From af2fa2e0b39c7981b902f305bd7213af226b0820 Mon Sep 17 00:00:00 2001
From: Jannis Klinkenberg <j.klinkenberg@itc.rwth-aachen.de>
Date: Wed, 30 Apr 2025 15:40:08 +0200
Subject: [PATCH] added serial job and tried to preserve content from help.itc
 example pages for job allocation

---
 generic-job-scripts/README.md                 |  3 ++-
 generic-job-scripts/beeond_job.sh             |  6 ++---
 generic-job-scripts/gpu_job_1gpu.sh           |  7 +++++-
 generic-job-scripts/gpu_job_2gpus-1proc.sh    |  7 +++++-
 generic-job-scripts/gpu_job_2gpus-2procs.sh   |  6 ++++-
 generic-job-scripts/gpu_job_4gpus-4procs.sh   |  7 +++++-
 generic-job-scripts/gpu_job_8gpus-8procs.sh   |  4 +++
 generic-job-scripts/hybrid_mpi_openmp_job.sh  |  4 ++-
 generic-job-scripts/mpi_job_1node.sh          |  3 ++-
 generic-job-scripts/mpi_job_2nodes.sh         |  3 ++-
 generic-job-scripts/mpi_job_basic.sh          |  3 ++-
 .../openmp_multi-threading_job.sh             |  3 ++-
 generic-job-scripts/serial_job.sh             | 25 +++++++++++++++++++
 13 files changed, 68 insertions(+), 13 deletions(-)
 create mode 100644 generic-job-scripts/serial_job.sh

diff --git a/generic-job-scripts/README.md b/generic-job-scripts/README.md
index e2f3166..5ff7805 100644
--- a/generic-job-scripts/README.md
+++ b/generic-job-scripts/README.md
@@ -2,7 +2,7 @@
 
 This folder contains common job script examples and best practices.
 
-## 1. Asychronous jobs
+## 1. Asynchronous jobs
 
 The following table illustrates examples for asynchronous jobs that contain both:
 - The allocation requests for your job, e.g. in form of `#SBATCH` flags in your batch script
@@ -23,6 +23,7 @@ You can submit such jobs to the Slurm batch system via `sbatch <parameters> <scr
 | [mpi_job_1node.sh](mpi_job_1node.sh) | Runs an MPI job on a single node, demonstrating intra-node parallel processing with multiple processes per node. |
 | [mpi_job_2nodes.sh](mpi_job_2nodes.sh) | Runs an MPI job spanning 2 full compute nodes, demonstrating inter-node parallelism and distributed computing across multiple machines. |
 | [openmp_multi-threading_job.sh](openmp_multi-threading_job.sh) | Runs an multi-threaded (e.g. OpenMP) job, demonstrating inter-node shared-memory parallelism. |
+| [serial_job.sh](serial_job.sh) | A minimal job script that runs a serial job, that will only request a single CPU core. |
 
 ## 2. Interactive jobs
 
diff --git a/generic-job-scripts/beeond_job.sh b/generic-job-scripts/beeond_job.sh
index 6afac44..1980da5 100644
--- a/generic-job-scripts/beeond_job.sh
+++ b/generic-job-scripts/beeond_job.sh
@@ -3,7 +3,7 @@
 ### Slurm flags
 ############################################################
 
-# request Beeond
+# request BeeOND
 #SBATCH --beeond
 
 # specify other Slurm commands
@@ -13,10 +13,10 @@
 ### Execution / Commands
 ############################################################
 
-# copy files to Beeond
+# copy files to BeeOND mount
 cp -r $WORK/yourfiles $BEEOND
 
-# navigate to Beeond
+# navigate to BeeOND
 cd $BEEOND/yourfiles
 
 # perform your job, which has high I/O meta data and bandwidth demands
diff --git a/generic-job-scripts/gpu_job_1gpu.sh b/generic-job-scripts/gpu_job_1gpu.sh
index 5c651f4..9ff53cf 100644
--- a/generic-job-scripts/gpu_job_1gpu.sh
+++ b/generic-job-scripts/gpu_job_1gpu.sh
@@ -31,7 +31,12 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: Only a single GPU is used. However, due to billing
 #          settings, 24 CPU cores can be requested and used
 #          in conjunction with that GPU. That also enables
-#          multi-threaded preprocessing on the CPU side.
\ No newline at end of file
+#          multi-threaded pre-processing on the CPU side.
+
+<prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/gpu_job_2gpus-1proc.sh b/generic-job-scripts/gpu_job_2gpus-1proc.sh
index 578136a..88a6f8c 100644
--- a/generic-job-scripts/gpu_job_2gpus-1proc.sh
+++ b/generic-job-scripts/gpu_job_2gpus-1proc.sh
@@ -31,6 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:2 mapping between MPI processes and GPUs
-#          Process intened to use both GPUs 
+#          Process intened to use both GPUs. If your code is based on CUDA,
+#          you might internally need to use cudaSetDevice to target the individual GPUs.
 
+<prog> <params>
diff --git a/generic-job-scripts/gpu_job_2gpus-2procs.sh b/generic-job-scripts/gpu_job_2gpus-2procs.sh
index ae9553d..dcdd388 100644
--- a/generic-job-scripts/gpu_job_2gpus-2procs.sh
+++ b/generic-job-scripts/gpu_job_2gpus-2procs.sh
@@ -31,6 +31,10 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
-#          Each process intened to use 1 GPU 
+#          Each process intened to use 1 GPU
 
+srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/gpu_job_4gpus-4procs.sh b/generic-job-scripts/gpu_job_4gpus-4procs.sh
index 08efaac..9310e5f 100644
--- a/generic-job-scripts/gpu_job_4gpus-4procs.sh
+++ b/generic-job-scripts/gpu_job_4gpus-4procs.sh
@@ -31,6 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
-#          Each process intened to use 1 GPU 
+#          Each process intened to use 1 GPU
+
+srun <prog> <params>
 
diff --git a/generic-job-scripts/gpu_job_8gpus-8procs.sh b/generic-job-scripts/gpu_job_8gpus-8procs.sh
index da90d9a..7497bd8 100644
--- a/generic-job-scripts/gpu_job_8gpus-8procs.sh
+++ b/generic-job-scripts/gpu_job_8gpus-8procs.sh
@@ -31,7 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
 #          Each process intened to use 1 GPU.
 #          2 full compute nodes are used.
 
+srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/hybrid_mpi_openmp_job.sh b/generic-job-scripts/hybrid_mpi_openmp_job.sh
index fed587e..32d673f 100644
--- a/generic-job-scripts/hybrid_mpi_openmp_job.sh
+++ b/generic-job-scripts/hybrid_mpi_openmp_job.sh
@@ -27,8 +27,10 @@ echo "Current machine: $(hostname)"
 
 # Example: Hybrid MPI + OpenMP execution
 
-# set number of threads
+# set number of OpenMP threads to be used
 export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
+# Note: you can also use less OpenMP threads per process or experiment with different number of OpenMP threads in the same job by manually setting OMP_NUM_THREADS such as:
+# export OMP_NUM_THREADS=4
 
 # enable thread binding to pyhsical CPU cores
 export OMP_PLACES=cores
diff --git a/generic-job-scripts/mpi_job_1node.sh b/generic-job-scripts/mpi_job_1node.sh
index 95f8155..35b7d15 100644
--- a/generic-job-scripts/mpi_job_1node.sh
+++ b/generic-job-scripts/mpi_job_1node.sh
@@ -21,4 +21,5 @@ echo "Current machine: $(hostname)"
 ############################################################
 ### Execution / Commands
 ############################################################
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/mpi_job_2nodes.sh b/generic-job-scripts/mpi_job_2nodes.sh
index 46b566b..f63004d 100644
--- a/generic-job-scripts/mpi_job_2nodes.sh
+++ b/generic-job-scripts/mpi_job_2nodes.sh
@@ -21,4 +21,5 @@ echo "Current machine: $(hostname)"
 ############################################################
 ### Execution / Commands
 ############################################################
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/mpi_job_basic.sh b/generic-job-scripts/mpi_job_basic.sh
index ceac829..bc345a1 100644
--- a/generic-job-scripts/mpi_job_basic.sh
+++ b/generic-job-scripts/mpi_job_basic.sh
@@ -17,4 +17,5 @@
 #       Most likely on the same node but could in theory
 #       also be placed on different nodes.
 
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/openmp_multi-threading_job.sh b/generic-job-scripts/openmp_multi-threading_job.sh
index d5bed48..f06fd39 100644
--- a/generic-job-scripts/openmp_multi-threading_job.sh
+++ b/generic-job-scripts/openmp_multi-threading_job.sh
@@ -28,7 +28,8 @@ echo "Current machine: $(hostname)"
 
 # set number of OpenMP threads to be used
 export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
-# Note: you can also use less cores/threads or experiment with different number of cores/threads in the same job
+# Note: you can also use less OpenMP threads per process or experiment with different number of OpenMP threads in the same job by manually setting OMP_NUM_THREADS such as:
+# export OMP_NUM_THREADS=4
 
 # enable thread binding to pyhsical CPU cores
 export OMP_PLACES=cores
diff --git a/generic-job-scripts/serial_job.sh b/generic-job-scripts/serial_job.sh
new file mode 100644
index 0000000..58ce12f
--- /dev/null
+++ b/generic-job-scripts/serial_job.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+# Note: If you do not specify any requirements, your job will request 1 CPU core only
+
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_ser  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+
+# execute your program (utilizing 1 CPU core)
+<prog> <params>
\ No newline at end of file
-- 
GitLab