Skip to content
Snippets Groups Projects
Commit 0683f48b authored by andres's avatar andres
Browse files

adding configs to repo, and updating examples in readme

parent 3e068a39
No related branches found
No related tags found
No related merge requests found
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
# don't commit stuff in data, it may be too large and it's better to move it in other ways. # don't commit stuff in data, it may be too large and it's better to move it in other ways.
# except the configs of your experiments # except the configs of your experiments
data/* data/*
!data/configs/ !data/config/
# Don't commit rclone in case you are using it # Don't commit rclone in case you are using it
rclone.exe rclone.exe
......
...@@ -90,6 +90,15 @@ python 01_train_model.py +config=alternative.yaml ...@@ -90,6 +90,15 @@ python 01_train_model.py +config=alternative.yaml
############################### ###############################
python 01_train_model.py --multirun model.num_layers=1,2,3 python 01_train_model.py --multirun model.num_layers=1,2,3
###############################
# Executing multiple runs with launchers
###############################
python 01_train_model.py --multirun model.num_layers=1,2,3 +launcher=joblib
# or
python 01_train_model.py --multirun model.num_layers=1,2,3 +launcher=slurm
############################### ###############################
# Using Hydra and Slurm for cluster job submissions # Using Hydra and Slurm for cluster job submissions
############################### ###############################
......
# @package _global_
defaults:
- override /training: default
hydra:
mode: MULTIRUN
sweeper:
params:
model: net2,net5
training.lr: 1.0,0.1,0.001
training.epochs: 1
# @package _global_
defaults:
- override /model: net2
- override /training: default
# @package _global_
defaults:
- override /hydra/launcher: joblib
hydra:
launcher: # https://hydra.cc/docs/plugins/joblib_launcher/
n_jobs: 5
# @package _global_
defaults:
- override /hydra/launcher: submitit_slurm
hydra:
callbacks:
log_job_return:
_target_: hydra.experimental.callbacks.LogJobReturnCallback
launcher:
setup: [which python, echo 1]
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
# @package _global_
defaults:
- override /hydra/launcher: submitit_local
hydra:
callbacks:
log_job_return:
_target_: hydra.experimental.callbacks.LogJobReturnCallback
launcher: # https://hydra.cc/docs/plugins/submitit_launcher/
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
nodes: 1
# Model configuration for SimpleNet
num_layers: 3 # Number of layers in the neural network (int)
# Model configuration for SimpleNet
num_layers: 2 # Number of layers in the neural network (int)
# Model configuration for SimpleNet
num_layers: 5 # Number of layers in the neural network (int)
# Model configuration for SimpleNet
num_layers: 7 # Number of layers in the neural network (int)
defaults:
- model: default
- training: default
hydra:
run:
dir: ./data/outputs/${now:%Y-%m-%d_%H-%M-%S}_${hydra.job.name}
sweep:
dir: ./data/multirun/${now:%Y-%m-%d_%H-%M-%S}_${hydra.job.name}
subdir: ${hydra.job.num}
\ No newline at end of file
# Training configuration for MNIST model
batch_size: 512 # Input batch size for training (int)
test_batch_size: 1000 # Input batch size for testing (int)
epochs: 14 # Number of epochs to train (int)
lr: 1.0 # Learning rate (float)
gamma: 0.7 # Factor for the learning rate scheduler (float)
no_cuda: false # Disable CUDA (bool)
no_mps: false # Disable MacOS MPS GPU support (bool)
dry_run: false # Perform a dry run (do not update weights) (bool)
seed: 1 # Seed for random number generation (int)
log_interval: 10 # How often to log progress (int)
save_model: true # Whether to save the model to disk (bool)
data_dir: './data/datasets' # Directory to store the dataset (str)
model_dir: './data/models' # Directory to save trained models (str)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment