diff --git a/slurm/basic_mpi.sh b/slurm/basic_mpi.sh
deleted file mode 100644
index b1bc9cbd9335b3f5385caa4211a2cec65465f03d..0000000000000000000000000000000000000000
--- a/slurm/basic_mpi.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/zsh 
-############################################################
-### Slurm flags
-############################################################
-
-#SBATCH --ntasks=8              # Ask for 8 MPI tasks
-#SBATCH --time=00:15:00         # Run time of 15 minutes
-#SBATCH --job-name=example_job  # Sets the job name
-#SBATCH --output=stdout_%j.txt  # Redirects stdout and stderr to stdout.txt
-#SBATCH --account=<project-id>  # Insertg your project-id or delete this line
-
-############################################################
-### Execution / Commands
-############################################################
-srun hostname
\ No newline at end of file
diff --git a/slurm/beeond.sh b/slurm/beeond_job.sh
similarity index 100%
rename from slurm/beeond.sh
rename to slurm/beeond_job.sh
diff --git a/slurm/gpu_job_1gpu.sh b/slurm/gpu_job_1gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..74a97a39f6f6507b2a7f8d1d9d6f996d24f8defe
--- /dev/null
+++ b/slurm/gpu_job_1gpu.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --partition=c23g            # request partition with GPU nodes
+#SBATCH --nodes=1                   # request desired number of nodes
+#SBATCH --ntasks-per-node=1         # request desired number of processes (or MPI tasks)
+
+#SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+                                    # Note: On CLAIX-2023 each GPU can be used with 24 cores
+
+#SBATCH --gres=gpu:1                # specify desired number of GPUs per node
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_1gpus    # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: Only a single GPU is used. However, due to billing
+#          settings, 24 CPU cores can be requested and used
+#          for free.
\ No newline at end of file
diff --git a/slurm/gpu_job_2gpus-1proc.sh b/slurm/gpu_job_2gpus-1proc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..578136a5c36ff7bb6452ac4275c980fd0aa2801c
--- /dev/null
+++ b/slurm/gpu_job_2gpus-1proc.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --partition=c23g            # request partition with GPU nodes
+#SBATCH --nodes=1                   # request desired number of nodes
+#SBATCH --ntasks-per-node=1         # request desired number of processes (or MPI tasks)
+
+#SBATCH --cpus-per-task=48          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+                                    # Note: On CLAIX-2023 each GPU can be used with 24 cores
+
+#SBATCH --gres=gpu:2                # specify desired number of GPUs per node
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_gpus # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: 1:2 mapping between MPI processes and GPUs
+#          Process intened to use both GPUs 
+
diff --git a/slurm/gpu_job_2gpus-2procs.sh b/slurm/gpu_job_2gpus-2procs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ae9553df149019a9eb8d5ac90e5558bc3946efb2
--- /dev/null
+++ b/slurm/gpu_job_2gpus-2procs.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --partition=c23g            # request partition with GPU nodes
+#SBATCH --nodes=1                   # request desired number of nodes
+#SBATCH --ntasks-per-node=2         # request desired number of processes (or MPI tasks)
+
+#SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+                                    # Note: On CLAIX-2023 each GPU can be used with 24 cores
+
+#SBATCH --gres=gpu:2                # specify desired number of GPUs per node
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_gpus # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: 1:1 mapping between MPI processes and GPUs
+#          Each process intened to use 1 GPU 
+
diff --git a/slurm/gpu_job_4gpus-4procs.sh b/slurm/gpu_job_4gpus-4procs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..08efaac9e92a76f5d2c56f527aa797b3bd030522
--- /dev/null
+++ b/slurm/gpu_job_4gpus-4procs.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --partition=c23g            # request partition with GPU nodes
+#SBATCH --nodes=1                   # request desired number of nodes
+#SBATCH --ntasks-per-node=4         # request desired number of processes (or MPI tasks)
+
+#SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+                                    # Note: On CLAIX-2023 each GPU can be used with 24 cores
+
+#SBATCH --gres=gpu:4                # specify desired number of GPUs per node
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_gpus # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: 1:1 mapping between MPI processes and GPUs
+#          Each process intened to use 1 GPU 
+
diff --git a/slurm/gpu_job_8gpus-8procs.sh b/slurm/gpu_job_8gpus-8procs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..da90d9aa298e1c29e6056d9c70e252b07ee42a51
--- /dev/null
+++ b/slurm/gpu_job_8gpus-8procs.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --partition=c23g            # request partition with GPU nodes
+#SBATCH --nodes=2                   # request desired number of nodes
+#SBATCH --ntasks-per-node=4         # request desired number of processes (or MPI tasks)
+
+#SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+                                    # Note: On CLAIX-2023 each GPU can be used with 24 cores
+
+#SBATCH --gres=gpu:4                # specify desired number of GPUs per node
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_gpus # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: 1:1 mapping between MPI processes and GPUs
+#          Each process intened to use 1 GPU.
+#          2 full compute nodes are used.
+
diff --git a/slurm/hybrid_mpi_openmp_job.sh b/slurm/hybrid_mpi_openmp_job.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fed587ebdb36a38949c511de37b0ec0e3978c76c
--- /dev/null
+++ b/slurm/hybrid_mpi_openmp_job.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --nodes=2                   # request desired number of nodes
+#SBATCH --ntasks-per-node=4         # request desired number of processes (or MPI tasks)
+#SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_hyb  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: Hybrid MPI + OpenMP execution
+
+# set number of threads
+export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
+
+# enable thread binding to pyhsical CPU cores
+export OMP_PLACES=cores
+export OMP_PROC_BIND=spread     # aiming to maximize memory bandwidth utilization 
+# export OMP_PROC_BIND=close    # typically used in scenarios where neihboring threads need to communicate/synchronize a lot
+
+# execute your program
+srun <prog> <params>
\ No newline at end of file
diff --git a/slurm/mpi_job_1node.sh b/slurm/mpi_job_1node.sh
new file mode 100644
index 0000000000000000000000000000000000000000..95f81550b7db4939c97633c12e9a070c3852140e
--- /dev/null
+++ b/slurm/mpi_job_1node.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --nodes=1                   # request desired number of nodes
+#SBATCH --ntasks-per-node=96        # request desired number of processes (or MPI tasks)
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_mpi  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+srun hostname
\ No newline at end of file
diff --git a/slurm/mpi_job_2nodes.sh b/slurm/mpi_job_2nodes.sh
new file mode 100644
index 0000000000000000000000000000000000000000..46b566b96af3e272c3303f921b136f5500e18d44
--- /dev/null
+++ b/slurm/mpi_job_2nodes.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --nodes=2                   # request desired number of nodes
+#SBATCH --ntasks-per-node=96        # request desired number of processes (or MPI tasks)
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_mpi  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+srun hostname
\ No newline at end of file
diff --git a/slurm/mpi_job_basic.sh b/slurm/mpi_job_basic.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ceac829e82255921ef2e81ab28b0b2c5cec68cf6
--- /dev/null
+++ b/slurm/mpi_job_basic.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --ntasks=8                  # request desired number of processes (or MPI tasks)
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_mpi  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Note: Not specified where MPI tasks will be allocated.
+#       Most likely on the same node but could in theory
+#       also be placed on different nodes.
+
+srun hostname
\ No newline at end of file
diff --git a/slurm/openmp_multi-threading_job.sh b/slurm/openmp_multi-threading_job.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0eda13169fdc0c99028378b7e4341b063d4bccd2
--- /dev/null
+++ b/slurm/openmp_multi-threading_job.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --nodes=1                   # pure multi-threading restricted to single node
+#SBATCH --ntasks-per-node=1         # pure multi-threading restricted to single process
+#SBATCH --cpus-per-task=48          # request desired number of CPU cores or threads per process (default: 1)
+                                    # Note: available main memory is also scaling with
+                                    #       number of cores if not specified otherwise
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_omp  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+
+# Example: OpenMP
+
+# set number of threads
+export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
+
+# enable thread binding to pyhsical CPU cores
+export OMP_PLACES=cores
+export OMP_PROC_BIND=spread     # aiming to maximize memory bandwidth utilization 
+# export OMP_PROC_BIND=close    # typically used in scenarios where neihboring threads need to communicate/synchronize a lot
+
+# execute your program
+<prog> <params>
\ No newline at end of file