diff --git a/machine-and-deep-learning/ollama/README.md b/machine-and-deep-learning/ollama/README.md
index f3f999514839e64038808f983bb38f69f1747fbe..3aea1a243db2e74ec72975fec7f9cd3b3440a595 100644
--- a/machine-and-deep-learning/ollama/README.md
+++ b/machine-and-deep-learning/ollama/README.md
@@ -12,15 +12,40 @@ Please find more information to Ollama in the following links:
 - https://github.com/ollama/ollama
 - https://github.com/ollama/ollama-python
 
-## 1. Running Ollama with the official container
+## 0. Prerequisites
 
-... follows soon ...
+To demonstrate how to use Ollama with the `ollama-python` library, you first need to create a Python virtual environment. Run the following command **ONCE**:
+```bash
+# Specify the Ollama root directory, where binaries should be placed and where venv should be created, such as:
+export OLLAMA_ROOT_DIR=${HOME}/ollama
+
+# initialize environment variables that refer to installation and virtual environment
+source set_paths.sh
+
+# create the venv
+zsh create_venv.sh
+```
+
+## 1. Running Ollama
+
+ℹ️ **Note:** Examples here run `ollama serve` and `ollama run` in the background to enable concise demonstrations from a single script or shell. However, official examples also show that these commands can be run in separate shells on the same node instead.
+
+## 1.1. Running Ollama with the official container
+
+Since an Ollama container will be centrally provided on our HPC system **very soon**, you can start using the examples right away, either in your current shell or by submitting a batch job to run them on a backend node:
+```bash
+# run in current active shell
+zsh submit_job_container.sh
+
+# submit batch job
+sbatch submit_job_container.sh
+```
 
-## 2. Downloading and running Ollama manually
+## 1.2. Downloading and running Ollama manually
 
-Before beeing able to execute Ollama and run the exaples, you need to download Ollama and make it available to the upcoming workflow steps. Additionally, we use a Python virtual environment, to demonstrate how Ollama can be used via the `ollama-python` library.
+Before beeing able to execute Ollama and run the examples, you need to download Ollama and make it available to the upcoming workflow steps.
 
-Execute the following instructions **ONCE** to download Ollama and create the virtual environment:
+Execute the following instructions **ONCE** to download Ollama:
 ```bash
 # Specify the Ollama root directory, where binaries should be placed and where venv should be created, such as:
 export OLLAMA_ROOT_DIR=${HOME}/ollama
@@ -28,8 +53,8 @@ export OLLAMA_ROOT_DIR=${HOME}/ollama
 # initialize environment variables that refer to installation and virtual environment
 source set_paths.sh
 
-# Download the Ollama binaries and create the venv
-zsh download_and_create_venv.sh
+# download and extract the binariesthe venv
+zsh download_and_extract.sh
 ```
 
 Now you can execute the examples, either in the current shell or by submitting a batch job that runs the examples on a backend node:
diff --git a/machine-and-deep-learning/ollama/download_and_create_venv.sh b/machine-and-deep-learning/ollama/create_venv.sh
similarity index 57%
rename from machine-and-deep-learning/ollama/download_and_create_venv.sh
rename to machine-and-deep-learning/ollama/create_venv.sh
index 9e61f7340850c6b86aa65c18d5544c06317b377f..f37f930ed1c23607da3cc55ff132c41842e1072d 100644
--- a/machine-and-deep-learning/ollama/download_and_create_venv.sh
+++ b/machine-and-deep-learning/ollama/create_venv.sh
@@ -2,12 +2,6 @@
 
 # create required directory
 mkdir -p ${OLLAMA_ROOT_DIR}
-mkdir -p ${OLLAMA_INSTALL_DIR}
-
-# download Ollama binaries
-cd ${OLLAMA_INSTALL_DIR}
-curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz
-tar -xzf ollama-linux-amd64.tgz
 
 # create Python virtual
 module load Python
diff --git a/machine-and-deep-learning/ollama/download_and_extract.sh b/machine-and-deep-learning/ollama/download_and_extract.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b0d774ed827c50d1a803472533578efe9bd2ef56
--- /dev/null
+++ b/machine-and-deep-learning/ollama/download_and_extract.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/zsh
+
+# create required directory and download Ollama binaries
+mkdir -p ${OLLAMA_INSTALL_DIR} && cd ${OLLAMA_INSTALL_DIR}
+curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz
+tar -xzf ollama-linux-amd64.tgz
\ No newline at end of file
diff --git a/machine-and-deep-learning/ollama/set_paths.sh b/machine-and-deep-learning/ollama/set_paths.sh
index 9b23d6bbbf378f62e1288921f61a286030a05278..01704559c2f071a63680ac3b12b78c6c55c62929 100644
--- a/machine-and-deep-learning/ollama/set_paths.sh
+++ b/machine-and-deep-learning/ollama/set_paths.sh
@@ -2,7 +2,12 @@
 
 # path where Ollama binaries will be placed after download and extraction
 export OLLAMA_INSTALL_DIR=${OLLAMA_ROOT_DIR}/install
+
 # path to Python virtual environment
 export OLLAMA_VENV_DIR=${OLLAMA_ROOT_DIR}/venv_ollama
+
+# path to Ollama container image
+export OLLAMA_COINTAINER_IMAGE=${HOME}/ollama/ollama.sif
+
 # extend path to make it executable in the shell
 export PATH="${OLLAMA_INSTALL_DIR}/bin:${PATH}"
\ No newline at end of file
diff --git a/machine-and-deep-learning/ollama/submit_job_container.sh b/machine-and-deep-learning/ollama/submit_job_container.sh
new file mode 100644
index 0000000000000000000000000000000000000000..89a6e33d109982b847fadc790ef3392450ef37e2
--- /dev/null
+++ b/machine-and-deep-learning/ollama/submit_job_container.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/zsh
+############################################################
+### Slurm flags
+############################################################
+
+#SBATCH --time=00:15:00
+#SBATCH --partition=c23g
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --cpus-per-task=24
+#SBATCH --gres=gpu:1
+
+############################################################
+### Load modules or software
+############################################################
+
+# specify your Ollama root directory
+export OLLAMA_ROOT_DIR=${HOME}/ollama
+
+# set dependent paths
+source set_paths.sh
+
+# load Python and activate venv
+module load Python
+source ${OLLAMA_VENV_DIR}/bin/activate
+
+############################################################
+### Parameters and Settings
+############################################################
+
+# print some information about current system
+echo "Job nodes: ${SLURM_JOB_NODELIST}"
+echo "Current machine: $(hostname)"
+nvidia-smi
+
+############################################################
+### Execution (Model Training)
+############################################################
+
+# Note: setsid is utilized to gracefully stop processes and subprocesses
+
+# start the container that serves and runs the model
+setsid apptainer exec -e --nv ${OLLAMA_COINTAINER_IMAGE} \
+    bash -c "OLLAMA_HOST=0.0.0.0 ollama serve & sleep 5 && ollama run llama3.2 && tail -f /dev/null" &> log_container.log &
+
+# remember ID of process that has just been started in background
+export proc_id_container=$!
+
+# wait until model is up
+sleep 5
+
+# Example: prompt against LLM via REST API (Note: streaming is typically only useful when using a Chat frontends)
+echo "========== Example REST API =========="
+curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "prompt":"Why is the sky blue?", "stream": false}'
+echo "\n"
+
+# Example: prompt against LLM through ollama-python
+echo "========== Example Python via ollama-python =========="
+python3 ollama-example.py
+
+# cleanup: stop container and model: also kills serve and run processes
+kill -9 -- -${proc_id_container}
diff --git a/machine-and-deep-learning/ollama/submit_job_venv.sh b/machine-and-deep-learning/ollama/submit_job_venv.sh
index f9ca578cad9e3c97ba9974d9d084fddc84a5a019..ef6521b56c542b69365e2df8cf231fdb462e8acc 100644
--- a/machine-and-deep-learning/ollama/submit_job_venv.sh
+++ b/machine-and-deep-learning/ollama/submit_job_venv.sh
@@ -37,8 +37,10 @@ nvidia-smi
 ### Execution (Model Training)
 ############################################################
 
+# Note: setsid is utilized to gracefully stop processes and subprocesses
+
 # run server in background and redirect output
-ollama serve &> log_ollama_serve.log &
+setsid ollama serve &> log_ollama_serve.log &
 # remember ID of process that has just been started in background
 export proc_id_serve=$!
 
@@ -61,7 +63,4 @@ echo "========== Example Python via ollama-python =========="
 python3 ollama-example.py
 
 # cleanup: stop model and kill serve and run processes
-ollama stop llama3.2
-kill -9 ${proc_id_serve}
-# kill remaining ollama procs if not already done
-ps aux | grep '[o]llama' | awk '{print $2}' | xargs -r kill -9
+kill -9 -- -${proc_id_serve}