diff --git a/generic-job-scripts/README.md b/generic-job-scripts/README.md
index e2f3166f9daf6129134ba38fb5450675b24e3e9b..5ff78059e78466b52006448a64aa234670096dbf 100644
--- a/generic-job-scripts/README.md
+++ b/generic-job-scripts/README.md
@@ -2,7 +2,7 @@
 
 This folder contains common job script examples and best practices.
 
-## 1. Asychronous jobs
+## 1. Asynchronous jobs
 
 The following table illustrates examples for asynchronous jobs that contain both:
 - The allocation requests for your job, e.g. in form of `#SBATCH` flags in your batch script
@@ -23,6 +23,7 @@ You can submit such jobs to the Slurm batch system via `sbatch <parameters> <scr
 | [mpi_job_1node.sh](mpi_job_1node.sh) | Runs an MPI job on a single node, demonstrating intra-node parallel processing with multiple processes per node. |
 | [mpi_job_2nodes.sh](mpi_job_2nodes.sh) | Runs an MPI job spanning 2 full compute nodes, demonstrating inter-node parallelism and distributed computing across multiple machines. |
 | [openmp_multi-threading_job.sh](openmp_multi-threading_job.sh) | Runs an multi-threaded (e.g. OpenMP) job, demonstrating inter-node shared-memory parallelism. |
+| [serial_job.sh](serial_job.sh) | A minimal job script that runs a serial job, that will only request a single CPU core. |
 
 ## 2. Interactive jobs
 
diff --git a/generic-job-scripts/beeond_job.sh b/generic-job-scripts/beeond_job.sh
index 6afac44883645f43afeb9b3a2e44fbb953a04d22..1980da5af2ce72dc3870fb5d8f6cd149dd1e6c94 100644
--- a/generic-job-scripts/beeond_job.sh
+++ b/generic-job-scripts/beeond_job.sh
@@ -3,7 +3,7 @@
 ### Slurm flags
 ############################################################
 
-# request Beeond
+# request BeeOND
 #SBATCH --beeond
 
 # specify other Slurm commands
@@ -13,10 +13,10 @@
 ### Execution / Commands
 ############################################################
 
-# copy files to Beeond
+# copy files to BeeOND mount
 cp -r $WORK/yourfiles $BEEOND
 
-# navigate to Beeond
+# navigate to BeeOND
 cd $BEEOND/yourfiles
 
 # perform your job, which has high I/O meta data and bandwidth demands
diff --git a/generic-job-scripts/gpu_job_1gpu.sh b/generic-job-scripts/gpu_job_1gpu.sh
index 5c651f42578711abc2dc19de0f534c328dc11d45..9ff53cf48f3e25f27d6a47a5a83b688d6a86db35 100644
--- a/generic-job-scripts/gpu_job_1gpu.sh
+++ b/generic-job-scripts/gpu_job_1gpu.sh
@@ -31,7 +31,12 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: Only a single GPU is used. However, due to billing
 #          settings, 24 CPU cores can be requested and used
 #          in conjunction with that GPU. That also enables
-#          multi-threaded preprocessing on the CPU side.
\ No newline at end of file
+#          multi-threaded pre-processing on the CPU side.
+
+<prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/gpu_job_2gpus-1proc.sh b/generic-job-scripts/gpu_job_2gpus-1proc.sh
index 578136a5c36ff7bb6452ac4275c980fd0aa2801c..88a6f8c9f1925b37cbe68b0125b6130ca0e59a2b 100644
--- a/generic-job-scripts/gpu_job_2gpus-1proc.sh
+++ b/generic-job-scripts/gpu_job_2gpus-1proc.sh
@@ -31,6 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:2 mapping between MPI processes and GPUs
-#          Process intened to use both GPUs 
+#          Process intened to use both GPUs. If your code is based on CUDA,
+#          you might internally need to use cudaSetDevice to target the individual GPUs.
 
+<prog> <params>
diff --git a/generic-job-scripts/gpu_job_2gpus-2procs.sh b/generic-job-scripts/gpu_job_2gpus-2procs.sh
index ae9553df149019a9eb8d5ac90e5558bc3946efb2..dcdd388e533acf12cc1af82c0d444b19c8691350 100644
--- a/generic-job-scripts/gpu_job_2gpus-2procs.sh
+++ b/generic-job-scripts/gpu_job_2gpus-2procs.sh
@@ -31,6 +31,10 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
-#          Each process intened to use 1 GPU 
+#          Each process intened to use 1 GPU
 
+srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/gpu_job_4gpus-4procs.sh b/generic-job-scripts/gpu_job_4gpus-4procs.sh
index 08efaac9e92a76f5d2c56f527aa797b3bd030522..9310e5f4acf0f9be5a3a6bd43d96e27e5378196c 100644
--- a/generic-job-scripts/gpu_job_4gpus-4procs.sh
+++ b/generic-job-scripts/gpu_job_4gpus-4procs.sh
@@ -31,6 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
-#          Each process intened to use 1 GPU 
+#          Each process intened to use 1 GPU
+
+srun <prog> <params>
 
diff --git a/generic-job-scripts/gpu_job_8gpus-8procs.sh b/generic-job-scripts/gpu_job_8gpus-8procs.sh
index da90d9aa298e1c29e6056d9c70e252b07ee42a51..7497bd832efd5cdb116556f9c79216bbd7e669cf 100644
--- a/generic-job-scripts/gpu_job_8gpus-8procs.sh
+++ b/generic-job-scripts/gpu_job_8gpus-8procs.sh
@@ -31,7 +31,11 @@ nvidia-smi
 ### Execution / Commands
 ############################################################
 
+# Optional: Load desired models for GPU such as CUDA
+# module load CUDA
+
 # Example: 1:1 mapping between MPI processes and GPUs
 #          Each process intened to use 1 GPU.
 #          2 full compute nodes are used.
 
+srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/hybrid_mpi_openmp_job.sh b/generic-job-scripts/hybrid_mpi_openmp_job.sh
index fed587ebdb36a38949c511de37b0ec0e3978c76c..32d673f110b4e64364af59a2d20140bcabe6a1f0 100644
--- a/generic-job-scripts/hybrid_mpi_openmp_job.sh
+++ b/generic-job-scripts/hybrid_mpi_openmp_job.sh
@@ -27,8 +27,10 @@ echo "Current machine: $(hostname)"
 
 # Example: Hybrid MPI + OpenMP execution
 
-# set number of threads
+# set number of OpenMP threads to be used
 export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
+# Note: you can also use less OpenMP threads per process or experiment with different number of OpenMP threads in the same job by manually setting OMP_NUM_THREADS such as:
+# export OMP_NUM_THREADS=4
 
 # enable thread binding to pyhsical CPU cores
 export OMP_PLACES=cores
diff --git a/generic-job-scripts/mpi_job_1node.sh b/generic-job-scripts/mpi_job_1node.sh
index 95f81550b7db4939c97633c12e9a070c3852140e..35b7d159b98164ace39a407fe0300977cc9a50f7 100644
--- a/generic-job-scripts/mpi_job_1node.sh
+++ b/generic-job-scripts/mpi_job_1node.sh
@@ -21,4 +21,5 @@ echo "Current machine: $(hostname)"
 ############################################################
 ### Execution / Commands
 ############################################################
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/mpi_job_2nodes.sh b/generic-job-scripts/mpi_job_2nodes.sh
index 46b566b96af3e272c3303f921b136f5500e18d44..f63004da8baa7af46ecdb17537af2b81d854d24c 100644
--- a/generic-job-scripts/mpi_job_2nodes.sh
+++ b/generic-job-scripts/mpi_job_2nodes.sh
@@ -21,4 +21,5 @@ echo "Current machine: $(hostname)"
 ############################################################
 ### Execution / Commands
 ############################################################
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/mpi_job_basic.sh b/generic-job-scripts/mpi_job_basic.sh
index ceac829e82255921ef2e81ab28b0b2c5cec68cf6..bc345a132c745f6deb3c8575c1f6a1977e9becea 100644
--- a/generic-job-scripts/mpi_job_basic.sh
+++ b/generic-job-scripts/mpi_job_basic.sh
@@ -17,4 +17,5 @@
 #       Most likely on the same node but could in theory
 #       also be placed on different nodes.
 
-srun hostname
\ No newline at end of file
+srun hostname
+# srun <prog> <params>
\ No newline at end of file
diff --git a/generic-job-scripts/openmp_multi-threading_job.sh b/generic-job-scripts/openmp_multi-threading_job.sh
index d5bed48c7a328712e203f6128a9b93c6fe511a7a..f06fd394d73df38e50f0d1143fff624dd0cc6bba 100644
--- a/generic-job-scripts/openmp_multi-threading_job.sh
+++ b/generic-job-scripts/openmp_multi-threading_job.sh
@@ -28,7 +28,8 @@ echo "Current machine: $(hostname)"
 
 # set number of OpenMP threads to be used
 export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}   # usually automatically set by SLURM
-# Note: you can also use less cores/threads or experiment with different number of cores/threads in the same job
+# Note: you can also use less OpenMP threads per process or experiment with different number of OpenMP threads in the same job by manually setting OMP_NUM_THREADS such as:
+# export OMP_NUM_THREADS=4
 
 # enable thread binding to pyhsical CPU cores
 export OMP_PLACES=cores
diff --git a/generic-job-scripts/serial_job.sh b/generic-job-scripts/serial_job.sh
new file mode 100644
index 0000000000000000000000000000000000000000..58ce12f29de6f5e655d224fb3ed40f84fa43b189
--- /dev/null
+++ b/generic-job-scripts/serial_job.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/zsh 
+############################################################
+### Slurm flags
+############################################################
+
+# Note: If you do not specify any requirements, your job will request 1 CPU core only
+
+#SBATCH --time=00:15:00             # max. run time of the job
+#SBATCH --job-name=example_job_ser  # set the job name
+#SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
+#SBATCH --account=<project-id>      # insert your project-id or delete this line
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Current machine: $(hostname)"
+
+############################################################
+### Execution / Commands
+############################################################
+
+# execute your program (utilizing 1 CPU core)
+<prog> <params>
\ No newline at end of file