Skip to content
Snippets Groups Projects
Commit 4be2548b authored by Leon Michel Gorißen's avatar Leon Michel Gorißen
Browse files

chore:rename and delete uncommented code

Renamed file paths so that they fit with data publication
Deleted uncommented and unnecessary code.
parent 57458092
No related branches found
No related tags found
No related merge requests found
...@@ -161,15 +161,15 @@ if __name__ == "__main__": ...@@ -161,15 +161,15 @@ if __name__ == "__main__":
elif (count == 1): elif (count == 1):
continue continue
model = load_model_from_binary_file( model = load_model_from_binary_file(
"/app/dynamics_learning/Foundation_Model/models/Instance_model_ITA_2024-11-05_09-30-54_0.2551353871822357.h5" "/app/dynamics_learning/Benchmark-models/pretrained/Instance_model_ITA.h5"
) )
sweep_id, sweep_config = setup_sweep(create_sweep=True, from_model=True) sweep_id, sweep_config = setup_sweep(create_sweep=True, from_model=True)
elif (count == 2): elif (count == 2):
model = load_model_from_binary_file( model = load_model_from_binary_file(
"/app/dynamics_learning/Foundation_Model/models/Instance_model_ITA_2024-11-05_09-30-54_0.2551353871822357.h5" "/app/dynamics_learning/Benchmark-model/pretrained/Instance_model_ITA.h5"
) )
config_data = load_config( config_data = load_config(
"/app/dynamics_learning/Foundation_Model/hyperparameters/hyperparameters_Instance_model_ITA_2024-11-05_09-30-54_0.2551353871822357.json" "/app/dynamics_learning/Benchmark-models/pretrained/hyperparameters_instance_model_ITA.json"
) )
logger.info(f"Setting model to \n{model}\nand config_data to \n{config_data}") logger.info(f"Setting model to \n{model}\nand config_data to \n{config_data}")
sweep_id, sweep_config = setup_sweep_from_hyperparameters( sweep_id, sweep_config = setup_sweep_from_hyperparameters(
...@@ -177,10 +177,10 @@ if __name__ == "__main__": ...@@ -177,10 +177,10 @@ if __name__ == "__main__":
) )
elif (count == 3): elif (count == 3):
model = load_model_from_binary_file( model = load_model_from_binary_file(
"/app/dynamics_learning/Foundation_Model/models/Foundation_model_2024-11-04_01-12-00_2.492475748062134.h5" "/app/dynamics_learning/Benchmark-models/pretrained/Foundation_model.h5"
) )
config_data = load_config( config_data = load_config(
"/app/dynamics_learning/Foundation_Model/hyperparameters/hyperparameters_Foundation_model_2024-11-04_01-12-00_2.492475748062134.json" "/app/dynamics_learning/Benchmark-models/pretrained/hyperparameters_foundation_model.json"
) )
sweep_id, sweep_config = setup_sweep_from_hyperparameters( sweep_id, sweep_config = setup_sweep_from_hyperparameters(
config_data=config_data, create_sweep=True config_data=config_data, create_sweep=True
...@@ -222,216 +222,3 @@ if __name__ == "__main__": ...@@ -222,216 +222,3 @@ if __name__ == "__main__":
logger.info(f"The fourth model using sweep {sweep_id4} was trained for {runs_model4} runs and reached a validation loss of {val_loss_model4}.") logger.info(f"The fourth model using sweep {sweep_id4} was trained for {runs_model4} runs and reached a validation loss of {val_loss_model4}.")
wandb.finish() wandb.finish()
# ###############################################
# ####################Model 1####################
# ###############################################
# if model1:
# # LLT instance model trained from scratch
# robot_uuid = LLT_ROBOT_UUID
# directory = Path(
# f"/app/dynamics_learning/benchmark_trajectory_data/{robot_uuid}"
# )
# # Interpolate Training Data in UUID folders
# (
# attained_data,
# command_data,
# interpolated_command_data,
# q_qd_qdd_interpolated_command_input,
# tau_attained_input,
# ) = prepare_data(directory)
# # ensure that the sweep id is set correctly: g5qxvipa
# # assert SWEEP_ID == "g5qxvipa", "Sweep ID is not set correctly. Ensure that the sweep id is set to g5qxvipa"
# assert (
# robot_uuid == LLT_ROBOT_UUID
# ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT_UUID"
# sweep_id, sweep_config = setup_sweep(create_sweep=True)
# # reset runs counter
# runs = 0
# val_loss = 1000
# # Train the model until the threshold validation loss is reached
# train_until_threshold_val_loss(
# sweep_id=sweep_id,
# robot_uuid=robot_uuid,
# q_qd_qdd_interpolated_command_input=q_qd_qdd_interpolated_command_input,
# tau_attained_input=tau_attained_input,
# model=None,
# notes="Sweep to train model from scratch. 100 Trajectories are avaiulable for training. Training ist stoped when the validation loss is below 50.",
# )
# runs_model1 = runs
# val_loss_model1 = val_loss
# sweep_id1 = sweep_id
# ###############################################
# ####################Model 2####################
# ###############################################
# if model2:
# # LLT model based on ITA model without known hyperparameters
# robot_uuid = LLT_ROBOT_UUID
# directory = Path(
# f"/app/dynamics_learning/benchmark_trajectory_data/{robot_uuid}"
# )
# # Interpolate Training Data in UUID folders
# (
# attained_data,
# command_data,
# interpolated_command_data,
# q_qd_qdd_interpolated_command_input,
# tau_attained_input,
# ) = prepare_data(directory)
# # assert SWEEP_ID == "42d8t40t", "Sweep ID is not set correctly. Ensure that the sweep id is set to 42d8t40t"
# assert (
# robot_uuid == LLT_ROBOT_UUID
# ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT_UUID"
# sweep_id, sweep_config = setup_sweep(create_sweep=True)
# # reset runs counter
# runs = 0
# val_loss = 1000
# model = load_model_from_binary_file(
# "/app/dynamics_learning/models/99.99706268310547.h5"
# )
# # Train the model until the threshold validation loss is reached
# train_until_threshold_val_loss(
# sweep_id=sweep_id,
# robot_uuid=robot_uuid,
# q_qd_qdd_interpolated_command_input=q_qd_qdd_interpolated_command_input,
# tau_attained_input=tau_attained_input,
# model=model,
# notes="Sweep to train model based on ITA model. 100 Trajectories are avaiulable for training. Training is stoped when the validation loss is below 50.",
# )
# runs_model2 = runs
# val_loss_model2 = val_loss
# sweep_id2 = sweep_id
# ###############################################
# ####################Model 3####################
# ###############################################
# if model3:
# # LLT model based on ITA model with known hyperparameters
# robot_uuid = LLT_ROBOT_UUID
# directory = Path(f"/app/dynamics_learning/Trajectory Data/train/{robot_uuid}")
# # Interpolate Training Data in UUID folders
# (
# attained_data,
# command_data,
# interpolated_command_data,
# q_qd_qdd_interpolated_command_input,
# tau_attained_input,
# ) = prepare_data(directory)
# # assert SWEEP_ID == "42d8t40t", "Sweep ID is not set correctly. Ensure that the sweep id is set to 42d8t40t"
# assert (
# robot_uuid == LLT_ROBOT_UUID
# ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT_UUID"
# config_data = load_config(
# "/app/dynamics_learning/Foundation_Model/models/hyperparameters.json"
# )
# sweep_id, sweep_config = setup_sweep_from_hyperparameters(
# config_data=config_data, create_sweep=True
# )
# # reset runs counter
# runs = 0
# val_loss = 1000
# model = load_model_from_binary_file(
# "/app/dynamics_learning/models/99.99706268310547.h5"
# )
# # Train the model until the threshold validation loss is reached
# train_until_threshold_val_loss(
# sweep_id=sweep_id,
# robot_uuid=robot_uuid,
# q_qd_qdd_interpolated_command_input=q_qd_qdd_interpolated_command_input,
# tau_attained_input=tau_attained_input,
# model=model,
# notes="Sweep to train model based on ITA model with known hyperparameters. 50 Trajectories are avaiulable for training. Training ist stoped when the validation loss is below 50.",
# )
# runs_model3 = runs
# val_loss_model3 = val_loss
# sweep_id3 = sweep_id
# # assert (
# # SWEEP_ID == "fe3gjovo"
# # ), "Sweep ID is not set correctly. Ensure that the sweep id is set to fe3gjovo"
# # assert (
# # robot_uuid == LLT_ROBOT_UUID
# # ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT"
# ###############################################
# ####################Model 4####################
# ###############################################
# if model4:
# # LLT model based on foundation model
# # assert (
# # SWEEP_ID == "7tglijx8"
# # ), "Sweep ID is not set correctly. Ensure that the sweep id is set to 7tglijx8"
# # assert (
# # robot_uuid == LLT_ROBOT_UUID
# # ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT"
# robot_uuid = LLT_ROBOT_UUID
# directory = Path(f"/app/dynamics_learning/Trajectory Data/train/{robot_uuid}")
# # Interpolate Training Data in UUID folders
# (
# attained_data,
# command_data,
# interpolated_command_data,
# q_qd_qdd_interpolated_command_input,
# tau_attained_input,
# ) = prepare_data(directory)
# # assert SWEEP_ID == "42d8t40t", "Sweep ID is not set correctly. Ensure that the sweep id is set to 42d8t40t"
# assert (
# robot_uuid == LLT_ROBOT_UUID
# ), "Robot UUID is not set correctly. Ensure that the robot uuid is set to LLT_ROBOT_UUID"
# config_data = load_config(
# "/app/dynamics_learning/Foundation_Model/models/hyperparameters.json"
# )
# sweep_id, sweep_config = setup_sweep_from_hyperparameters(
# config_data=config_data, create_sweep=True
# )
# # reset runs counter
# runs = 0
# val_loss = 1000
# model = load_model_from_binary_file(
# "/app/dynamics_learning/Foundation_Model/models/Foundation_model.h5"
# )
# # Train the model until the threshold validation loss is reached
# train_until_threshold_val_loss(
# sweep_id=sweep_id,
# robot_uuid=robot_uuid,
# q_qd_qdd_interpolated_command_input=q_qd_qdd_interpolated_command_input,
# tau_attained_input=tau_attained_input,
# model=model,
# notes="Sweep to train model based on foundation model with known hyperparameters. 50 Trajectories are avaiulable for training. Training ist stoped when the validation loss is below 50.",
# )
# runs_model4 = runs
# val_loss_model4 = val_loss
# sweep_id4 = sweep_id
# logger.info(f"""Training concluded.
# The first model using sweep {sweep_id1} was trained for {runs_model1} runs and reached a validation loss of {val_loss_model1}.
# The second model using sweep {sweep_id2} was trained for {runs_model2} runs and reached a validation loss of {val_loss_model2}.
# The third model using sweep {sweep_id3} was trained for {runs_model3} runs and reached a validation loss of {val_loss_model3}.
# The fourth model using sweep {sweep_id4} was trained for {runs_model4} runs and reached a validation loss of {val_loss_model4}.
# """)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment