From 1e62f288b6b937b7413a1b409e173958948088bd Mon Sep 17 00:00:00 2001
From: Jannis Klinkenberg <j.klinkenberg@itc.rwth-aachen.de>
Date: Mon, 26 May 2025 07:54:40 +0200
Subject: [PATCH] minor fixes

---
 machine-and-deep-learning/ollama/README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/machine-and-deep-learning/ollama/README.md b/machine-and-deep-learning/ollama/README.md
index 24122ba..eaec1b6 100644
--- a/machine-and-deep-learning/ollama/README.md
+++ b/machine-and-deep-learning/ollama/README.md
@@ -18,12 +18,12 @@ To demonstrate how to use Ollama with the `ollama-python` library, you first nee
 ```bash
 # Specify the Ollama root directory
 export OLLAMA_ROOT_DIR=${HOME}/ollama
+mkdir -p ${OLLAMA_ROOT_DIR}
 # set further relative path variables
 source set_paths.sh
 
 # create the venv
 module load Python
-mkdir -p ${OLLAMA_ROOT_DIR}
 python -m venv ${OLLAMA_VENV_DIR}
 source ${OLLAMA_VENV_DIR}/bin/activate
 pip install ollama
@@ -33,7 +33,7 @@ pip install ollama
 
 ℹ️ **Note:** Examples here run `ollama serve` and `ollama run` in the background to enable concise demonstrations from a single script or shell. However, official examples also show that these commands can be run in separate shells on the same node instead.
 
-## 1.1. Running Ollama with the official container
+## 1.1. Running Ollama with the official container (recommended)
 
 An Ollama container will be centrally provided on our HPC system **very soon**. However, for now lets assume we created one with the following command:
 ```bash
-- 
GitLab