From acc3c2a356831f68c2b01c47f863ae64f0adb5a5 Mon Sep 17 00:00:00 2001
From: andres <andresfp14@gmail.com>
Date: Thu, 23 May 2024 15:02:08 +0200
Subject: [PATCH] bugs

---
 README.md                                   | 6 +++---
 data/config/experiment/sweep_models_lr.yaml | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/README.md b/README.md
index 13f848b..78af6b1 100644
--- a/README.md
+++ b/README.md
@@ -80,7 +80,7 @@ python runs/01_train_model.py training.epochs=2 training.seed=7
 ```bash
 # Execute multiple runs with different model sizes using Hydra's multirun feature
 # This command will run the script for each combination of the specified values
-python runs/01_train_model.py --multirun training.epochs=2 model.num_layers=1,2,3
+python runs/01_train_model.py --multirun training.epochs=2 model.object.num_layers=1,2,3
 
 # Execute multiple runs as defined in a configuration file
 python runs/01_train_model.py +experiment=sweep_models_lr
@@ -91,10 +91,10 @@ python runs/01_train_model.py +experiment=sweep_models_lr
 ```bash
 # Execute multiple runs with Hydra's joblib launcher
 # This will run the script for each combination of the specified values using joblib for parallel execution
-python runs/01_train_model.py --multirun training.epochs=2 model.num_layers=1,2,3 +launcher=joblib
+python runs/01_train_model.py --multirun training.epochs=2 model.object.num_layers=1,2,3 +launcher=joblib
 
 # Or use Hydra's slurm launcher for running on a Slurm-based cluster
-python runs/01_train_model.py --multirun training.epochs=2 model.num_layers=1,2,3 +launcher=slurm
+python runs/01_train_model.py --multirun training.epochs=2 model.object.num_layers=1,2,3 +launcher=slurm
 
 # Or use Slurm with GPU support, running the script with multiple seed values
 python runs/01_train_model.py --multirun training.epochs=2 training.seed=0,1,2,3,4 +launcher=slurmgpu
diff --git a/data/config/experiment/sweep_models_lr.yaml b/data/config/experiment/sweep_models_lr.yaml
index 4bda5f0..32478a1 100644
--- a/data/config/experiment/sweep_models_lr.yaml
+++ b/data/config/experiment/sweep_models_lr.yaml
@@ -1,6 +1,6 @@
 # @package _global_
 defaults:
-  - override /training: default
+  - override /training: short
 
 hydra: 
   mode: MULTIRUN
-- 
GitLab