Skip to content
Snippets Groups Projects
Select Git revision
  • dac2f0f2a548b6eac81a156a1e5ece6a172f566c
  • 5.4 default protected
  • 5.5
  • dev/5.5
  • dev/5.4
  • dev/5.3_downgrade
  • feature/experimenttime_hack
  • 5.3 protected
  • _IntenSelect5.3
  • IntenSelect5.3
  • 4.27 protected
  • 4.26 protected
  • 5.0 protected
  • 4.22 protected
  • 4.21 protected
  • UE5.4-2024.1
  • UE5.4-2024.1-rc1
  • UE5.3-2023.1-rc3
  • UE5.3-2023.1-rc2
  • UE5.3-2023.1-rc
20 results

BP_GrabbableTestObject.uasset

Blame
  • gpu_job_1gpu.sh 1.75 KiB
    #!/usr/bin/zsh
    ############################################################
    ### Slurm flags
    ############################################################
    
    #SBATCH --partition=c23g            # request partition with GPU nodes
    #SBATCH --nodes=1                   # request desired number of nodes
    #SBATCH --ntasks-per-node=1         # request desired number of processes (or MPI tasks)
    
    #SBATCH --cpus-per-task=24          # request desired number of CPU cores or threads per process (default: 1)
                                        # Note: available main memory is also scaling with
                                        #       number of cores if not specified otherwise
                                        # Note: On CLAIX-2023 each GPU can be used with 24 cores
    
    #SBATCH --gres=gpu:1                # specify desired number of GPUs per node
    #SBATCH --time=00:15:00             # max. run time of the job
    #SBATCH --job-name=example_1gpus    # set the job name
    #SBATCH --output=stdout_%j.txt      # redirects stdout and stderr to stdout.txt
    #SBATCH --account=<project-id>      # insert your project-id or delete this line
    
    ############################################################
    ### Parameters and Settings
    ############################################################
    
    # print some information about current system
    echo "Job nodes: ${SLURM_JOB_NODELIST}"
    echo "Current machine: $(hostname)"
    nvidia-smi
    
    ############################################################
    ### Execution / Commands
    ############################################################
    
    # Example: Only a single GPU is used. However, due to billing
    #          settings, 24 CPU cores can be requested and used
    #          in conjunction with that GPU. That also enables
    #          multi-threaded preprocessing on the CPU side.