Skip to content
Snippets Groups Projects
Verified Commit e5758cb0 authored by Jannis Klinkenberg's avatar Jannis Klinkenberg
Browse files

added Slurm examples for CLAIX-2023

parent 483882a9
No related branches found
No related tags found
No related merge requests found
File moved
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=1 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:1 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_1gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: Only a single GPU is used. However, due to billing
# settings, 24 CPU cores can be requested and used
# for free.
\ No newline at end of file
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=1 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=48 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:2 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: 1:2 mapping between MPI processes and GPUs
# Process intened to use both GPUs
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=2 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:2 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: 1:1 mapping between MPI processes and GPUs
# Each process intened to use 1 GPU
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=4 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:4 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: 1:1 mapping between MPI processes and GPUs
# Each process intened to use 1 GPU
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=2 # request desired number of nodes
#SBATCH --ntasks-per-node=4 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:4 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: 1:1 mapping between MPI processes and GPUs
# Each process intened to use 1 GPU.
# 2 full compute nodes are used.
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --nodes=2 # request desired number of nodes
#SBATCH --ntasks-per-node=4 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_hyb # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
############################################################
### Execution / Commands
############################################################
# Example: Hybrid MPI + OpenMP execution
# set number of threads
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # usually automatically set by SLURM
# enable thread binding to pyhsical CPU cores
export OMP_PLACES=cores
export OMP_PROC_BIND=spread # aiming to maximize memory bandwidth utilization
# export OMP_PROC_BIND=close # typically used in scenarios where neihboring threads need to communicate/synchronize a lot
# execute your program
srun <prog> <params>
\ No newline at end of file
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=96 # request desired number of processes (or MPI tasks)
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_mpi # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
############################################################
### Execution / Commands
############################################################
srun hostname
\ No newline at end of file
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --nodes=2 # request desired number of nodes
#SBATCH --ntasks-per-node=96 # request desired number of processes (or MPI tasks)
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_mpi # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
############################################################
### Execution / Commands
############################################################
srun hostname
\ No newline at end of file
...@@ -3,13 +3,18 @@ ...@@ -3,13 +3,18 @@
### Slurm flags ### Slurm flags
############################################################ ############################################################
#SBATCH --ntasks=8 # Ask for 8 MPI tasks #SBATCH --ntasks=8 # request desired number of processes (or MPI tasks)
#SBATCH --time=00:15:00 # Run time of 15 minutes #SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job # Sets the job name #SBATCH --job-name=example_job_mpi # set the job name
#SBATCH --output=stdout_%j.txt # Redirects stdout and stderr to stdout.txt #SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # Insertg your project-id or delete this line #SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################ ############################################################
### Execution / Commands ### Execution / Commands
############################################################ ############################################################
# Note: Not specified where MPI tasks will be allocated.
# Most likely on the same node but could in theory
# also be placed on different nodes.
srun hostname srun hostname
\ No newline at end of file
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --nodes=1 # pure multi-threading restricted to single node
#SBATCH --ntasks-per-node=1 # pure multi-threading restricted to single process
#SBATCH --cpus-per-task=48 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_job_omp # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Current machine: $(hostname)"
############################################################
### Execution / Commands
############################################################
# Example: OpenMP
# set number of threads
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # usually automatically set by SLURM
# enable thread binding to pyhsical CPU cores
export OMP_PLACES=cores
export OMP_PROC_BIND=spread # aiming to maximize memory bandwidth utilization
# export OMP_PROC_BIND=close # typically used in scenarios where neihboring threads need to communicate/synchronize a lot
# execute your program
<prog> <params>
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment