Select Git revision
BP_GrabbableTestObject.uasset
gpu_job_1gpu.sh 1.75 KiB
#!/usr/bin/zsh
############################################################
### Slurm flags
############################################################
#SBATCH --partition=c23g # request partition with GPU nodes
#SBATCH --nodes=1 # request desired number of nodes
#SBATCH --ntasks-per-node=1 # request desired number of processes (or MPI tasks)
#SBATCH --cpus-per-task=24 # request desired number of CPU cores or threads per process (default: 1)
# Note: available main memory is also scaling with
# number of cores if not specified otherwise
# Note: On CLAIX-2023 each GPU can be used with 24 cores
#SBATCH --gres=gpu:1 # specify desired number of GPUs per node
#SBATCH --time=00:15:00 # max. run time of the job
#SBATCH --job-name=example_1gpus # set the job name
#SBATCH --output=stdout_%j.txt # redirects stdout and stderr to stdout.txt
#SBATCH --account=<project-id> # insert your project-id or delete this line
############################################################
### Parameters and Settings
############################################################
# print some information about current system
echo "Job nodes: ${SLURM_JOB_NODELIST}"
echo "Current machine: $(hostname)"
nvidia-smi
############################################################
### Execution / Commands
############################################################
# Example: Only a single GPU is used. However, due to billing
# settings, 24 CPU cores can be requested and used
# in conjunction with that GPU. That also enables
# multi-threaded preprocessing on the CPU side.