Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Column
width900px


Code Block
languagebash
themeEmacs
titleListing 1. Example Slurm batch script to run a solver with 1152 mpi tasks
#!/bin/bash --login
 
#SBATCH --job-name=[name_of_job]
#SBATCH --partition=work
#SBATCH --ntasks=1152
#SBATCH --ntasks-per-node=128
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --time=[neededTime]
 
module load openfoam-org-container/7

#--- Specific settings for the cluster you are on
#(Check the specific guide of the cluster for additional settings)

# ---
# Set MPI related environment variables. Not all need to be set
# main variables for multi-node jobs (uncomment for multinode jobs)
export MPICH_OFI_STARTUP_CONNECT=1
export MPICH_OFI_VERBOSE=1
#Ask MPI to provide useful runtime information (uncomment if debugging)
#export MPICH_ENV_DISPLAY=1
#export MPICH_MEMORY_REPORT=1


#--- Automating the list of IORANKS for collated fileHandler
echo "Setting the grouping ratio for collated fileHandling"
nProcs=$SLURM_NTASKS #Number of total processors in decomposition for this case
mGroup=32            #Size of the groups for collated fileHandling (32 is the initial recommendation for Setonix)
of_ioRanks="0"
iC=$mGroup
while [ $iC -le $nProcs ]; do
   of_ioRanks="$of_ioRanks $iC"
   ((iC += $mGroup))
done
export FOAM_IORANKS="("${of_ioRanks}")"
echo "FOAM_IORANKS=$FOAM_IORANKS"

#-- Execute the solver:
srun -N $SLURM_JOB_NUM_NODES -n $SLURM_NTASKS -c $SLURM_CPUS_PER_TASK1 \
     singularity exec $SINGULARITY_CONTAINER pimpleFoam -parallel


...

Column
width900px


Code Block
languagebash
themeEmacs
titleListing 2. Example Slurm batch script to run user's own solver with 1152 mpi tasks
#!/bin/bash --login
 
#SBATCH --job-name=[name_of_job]
#SBATCH --partition=work
#SBATCH --ntasks=1152
#SBATCH --ntasks-per-node=128
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --time=[neededTime]
 
module load openfoam-org-container/8

#--- Specific settings for the cluster you are on
#(Check the specific guide of the cluster for additional settings)

# ---
# Set MPI related environment variables. Not all need to be set
# main variables for multi-node jobs (uncomment for multinode jobs)
export MPICH_OFI_STARTUP_CONNECT=1
export MPICH_OFI_VERBOSE=1
#Ask MPI to provide useful runtime information (uncomment if debugging)
#export MPICH_ENV_DISPLAY=1
#export MPICH_MEMORY_REPORT=1


#--- Automating the list of IORANKS for collated fileHandler
echo "Setting the grouping ratio for collated fileHandling"
nProcs=$SLURM_NTASKS #Number of total processors in decomposition for this case
mGroup=32            #Size of the groups for collated fileHandling (32 is the initial recommendation for Setonix)
of_ioRanks="0"
iC=$mGroup
while [ $iC -le $nProcs ]; do
   of_ioRanks="$of_ioRanks $iC"
   ((iC += $mGroup))
done
export FOAM_IORANKS="("${of_ioRanks}")"
echo "FOAM_IORANKS=$FOAM_IORANKS"

#-- Defining the binding paths:
wmpudInside=$(singularity exec $SINGULARITY_CONTAINER bash -c 'echo $WM_PROJECT_USER_DIR')
wmpudOutside=$MYSOFTWARE/OpenFOAM/$USER-8

#-- Execute user's own solver:
srun -N $SLURM_JOB_NUM_NODES -n $SLURM_NTASKS -c $SLURM_CPUS_PER_TASK1 \
     singularity exec -B $wmpudOutside:$wmpudInside \
     $SINGULARITY_CONTAINER yourSolverFoam -parallel


...

Column
width900px


Code Block
languagebash
themeEmacs
titleListing 4. Example Slurm batch script to run a solver with 1152 mpi tasks
#!/bin/bash --login
 
#SBATCH --job-name=[name_of_job]
#SBATCH --partition=work
#SBATCH --ntasks=1152
#SBATCH --ntasks-per-node=128
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --time=[neededTime]

#-- Loading modules
module load singularity/<version>

#-- Defining the singularity image to use
export SINGULARITY_CONTAINER=path/To/The/Image/imageName.sif

#--- Specific settings for the cluster you are on
#(Check the specific guide of the cluster for additional settings)

# ---
# Set MPI related environment variables. Not all need to be set
# main variables for multi-node jobs (uncomment for multinode jobs)
export MPICH_OFI_STARTUP_CONNECT=1
export MPICH_OFI_VERBOSE=1
#Ask MPI to provide useful runtime information (uncomment if debugging)
#export MPICH_ENV_DISPLAY=1
#export MPICH_MEMORY_REPORT=1

#-- Execute the solver:
srun -N $SLURM_JOB_NUM_NODES -n $SLURM_NTASKS -c $SLURM_CPUS_PER_TASK1 \
     singularity exec $SINGULARITY_CONTAINER pimpleFoam -parallel


...