diff --git a/Optimization of control rules/Optimization_control_rules_load.ipynb b/Optimization of control rules/Optimization_control_rules_load.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2530bd55358c66dc0e91f16968b3e566f2171f20 --- /dev/null +++ b/Optimization of control rules/Optimization_control_rules_load.ipynb @@ -0,0 +1,837 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "be61f6efdeea355d", + "metadata": {}, + "source": [ + "#### Import of libraries" + ] + }, + { + "cell_type": "markdown", + "id": "e20bdc73", + "metadata": {}, + "source": [ + "General libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e120f145", + "metadata": {}, + "outputs": [], + "source": [ + "import typing\n", + "import datetime\n", + "import shutil\n", + "import subprocess\n", + "from multiprocessing.connection import Client\n", + "from time import sleep\n", + "from warnings import warn\n", + "from pathlib import Path\n", + "import functools\n", + "import threading\n", + "from multiprocessing.pool import ThreadPool\n", + "import traceback\n", + "import numpy as np\n", + "import pandas as pd\n", + "import nevergrad as ng\n", + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "from scipy.ndimage import gaussian_filter1d\n", + "from IPython.utils import io as ipyio\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "id": "13df68a6", + "metadata": {}, + "source": [ + "Libraries for the interface with MIKE+" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "initial_id", + "metadata": { + "ExecuteTime": { + "end_time": "2024-11-20T18:32:54.010553Z", + "start_time": "2024-11-20T18:32:51.575285Z" + } + }, + "outputs": [], + "source": [ + "from mikeplus import DataTableAccess\n", + "from mikeplus.engines import Engine1D\n", + "from mikeio1d import Res1D\n", + "from mikeio1d.query import QueryDataStructure\n", + "from mikeio1d.query import QueryDataNode" + ] + }, + { + "cell_type": "markdown", + "id": "d8ba7af66f30210", + "metadata": {}, + "source": [ + "#### Setting variables" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "7f4a620fa3107d75", + "metadata": {}, + "outputs": [], + "source": [ + "# the model_folder is the folder where the necessary input files are saved\n", + "# note this should be at the same folder depth as the original folder of this script, so that relative paths lead to the same files\n", + "model_folder = Path(\"statische Fracht Drosseloptimierung/\")\n", + "# the name of the modelfile\n", + "db_path_rel = \"240502_Modell Rodenkirchen.mupp\"\n", + "# set the number of optimization steps\n", + "optimization_budget = 60" + ] + }, + { + "cell_type": "markdown", + "id": "24d2e0b8", + "metadata": {}, + "source": [ + "Names of the input Excel files" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5275f8e7", + "metadata": {}, + "outputs": [], + "source": [ + "event_xlsx = \"config_events.xlsx\"\n", + "parameter_xlsx = \"config_parameter.xlsx\"\n", + "structure_xlsx = \"structures.xlsx\" # The pollution of the discharges are specified here as well." + ] + }, + { + "cell_type": "markdown", + "id": "e8f8ec05dab0280", + "metadata": {}, + "source": [ + "Further variables" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2512aea6d68be310", + "metadata": {}, + "outputs": [], + "source": [ + "# set the id of the sensors and the timezone\n", + "timezone = \"UTC+01:00\"\n", + "#Number of seats for the Engine, limits amount of simulation we can run at once\n", + "engine_seats = 4\n", + "# chose optimizer from nevergrade package\n", + "optimizers = [\"BayesOptimBO\"]\n", + "# delete result files(as specified by event config) before a sim, assures that no old file is reread in the case that a simulation does not run\n", + "DELETE_RESFILE_BEFORE_SIM = True\n", + "# output heaps of additional data, may be useful for debugging, Default = False\n", + "VERBOSE_OUTPUT = False\n", + "# attempt simulation at most so many times, used when a simulation fails\n", + "MAX_SIMULATION_TRY = 1\n", + "# Consider a simulation as failed and retry if so many values in the result compared to the reference are missing\n", + "MAX_MISSING_RATIO = 0.6\n", + "# delete the temp folder at end, Default = True\n", + "DELETE_TEMP = True\n", + "# time string for output folder\n", + "time_path_part = datetime.datetime.now().strftime(\"%Y-%m-%dT%H.%M.%S\")\n", + "# set path of temp and output folder\n", + "temp_folder = Path(\"tmp_\" + time_path_part)\n", + "# output folder\n", + "out_folder = Path(\"out_\" + time_path_part)\n", + "# wheter output from mike simulation get shown on error\n", + "show_sim_out_on_error = True" + ] + }, + { + "cell_type": "markdown", + "id": "c621349d1a2515d3", + "metadata": {}, + "source": [ + "#### Prepartion of the database\n" + ] + }, + { + "cell_type": "markdown", + "id": "4b52f384", + "metadata": {}, + "source": [ + "file_to_df: Reads the file as a pandas Dataframe for excel, csv, res1d files" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f8156368dd44c1bd", + "metadata": {}, + "outputs": [], + "source": [ + "def file_to_df(path: Path) -> pd.DataFrame:\n", + " if path.suffix == \".res1d\":\n", + " res1d = Res1D(str(path)) # Create a Res1D instance with the file\n", + " df = res1d.read_all() # Read all data from the .res1d file\n", + " return df\n", + " elif path.suffix == \".xlsx\":\n", + " return pd.read_excel(path)\n", + " elif path.suffix == \".csv\":\n", + " return pd.read_csv(path)\n", + " else:\n", + " raise NotImplementedError(f\"No method for {path.suffix} implemented\")" + ] + }, + { + "cell_type": "markdown", + "id": "a19c5fb4f02bff3e", + "metadata": {}, + "source": [ + "Creating output folder for results and temp folder where the model_folder is dublicated, to enshure that the origianl model is not damaged" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "460b9199eb10f580", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "WindowsPath('C:/Users/Schroeer/Python Skripte/out_2025-02-21T10.40.32')" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "out_folder.mkdir(exist_ok=True,parents=True)\n", + "assert not temp_folder.exists(),\"Make sure not to mess with existing folders\"\n", + "shutil.copytree(model_folder,temp_folder)\n", + "db_path = temp_folder / db_path_rel\n", + "\n", + "# print the path of the temp and out folder\n", + "temp_folder.absolute()\n", + "out_folder.absolute()" + ] + }, + { + "cell_type": "markdown", + "id": "503240fbd162ad3f", + "metadata": {}, + "source": [ + "Read the excel config_events" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "185e7a9554f35dc0", + "metadata": {}, + "outputs": [], + "source": [ + "events = file_to_df(temp_folder / event_xlsx).set_index('Event', drop=False)\n", + "# check if start time of events is before the end time.\n", + "assert (events[\"Start\"] <= events[\"End\"]).all(\n", + " axis=None\n", + "), \"Event end needs to be after start.\"\n", + "# add timezone. Use same as sensor_date\n", + "events[\"Start\"]=events[\"Start\"].dt.tz_localize(timezone)\n", + "events[\"End\"]=events[\"End\"].dt.tz_localize(timezone)\n", + "# check if there are duplicated events\n", + "assert events.index.is_unique, \"Need unique event\"\n", + "assert events.drop_duplicates(subset=\"Event\")[\"SimulationName\"].is_unique, \"Need exactly one simulation name per event\"\n", + "# Conversion of the SimulationName column to the String data type, if it is not already there\n", + "events[\"SimulationName\"] = events[\"SimulationName\"].astype(str)\n", + "events" + ] + }, + { + "cell_type": "markdown", + "id": "4edfb049ae1aa9bc", + "metadata": {}, + "source": [ + "Read the excel config_parameter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2825065f30d0835", + "metadata": {}, + "outputs": [], + "source": [ + "param_specs = file_to_df(temp_folder / parameter_xlsx)\n", + "\n", + "# give params_specs a sensible string index, this is later used to pass optimization parameters as a dictionary\n", + "param_specs.index = pd.Index(\n", + " param_specs[[\"Table\", \"Column\", \"Muid\"]].agg(\";\".join, axis=\"columns\"),\n", + " name=\"ParamKey\",\n", + ")\n", + "assert (\n", + " param_specs.index.is_unique\n", + "), \"Index needs to be unique. Otherwise indicates dupplicates in the parameters.\"\n", + "param_specs" + ] + }, + { + "cell_type": "markdown", + "id": "7fc5cf01", + "metadata": {}, + "source": [ + "Read the excel structures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f364649", + "metadata": {}, + "outputs": [], + "source": [ + "structures = file_to_df(temp_folder / structure_xlsx)\n", + "structures_list = structures['structures'].tolist()\n", + "structures_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6365d92", + "metadata": {}, + "outputs": [], + "source": [ + "fracht_list = structures['fracht'].tolist()\n", + "fracht_list" + ] + }, + { + "cell_type": "markdown", + "id": "e084bb2d61486c76", + "metadata": {}, + "source": [ + "Open Mike+ database and start engine" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "506b4cc9696676fe", + "metadata": {}, + "outputs": [], + "source": [ + "dta = DataTableAccess(db_path)\n", + "# open the database from MIKE+\n", + "dta.open_database()\n", + "# check if the database is open.\n", + "dta.is_database_open()\n", + "# Create the Engine1D object that will be used to run MIKE 1D simulations\n", + "engine = Engine1D(dta.datatables)" + ] + }, + { + "cell_type": "markdown", + "id": "9defc99104ef7ae9", + "metadata": {}, + "source": [ + "Threading is used to run multiple simulations with differnt engines" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c7455cfdf9a4d80", + "metadata": {}, + "outputs": [], + "source": [ + "engines = {}\n", + "\n", + "def init_engine():\n", + " engines[threading.current_thread()] = Engine1D(dta.datatables)\n", + " print(f'initialized engine for {threading.current_thread()}')\n", + "\n", + "# Use threadpool from the multiprocessing package, this has a rather simple interface\n", + "sim_executor = ThreadPool(processes=engine_seats,initializer=init_engine)" + ] + }, + { + "cell_type": "markdown", + "id": "873214d0391ede5b", + "metadata": {}, + "source": [ + "#### Functions for the Simulation" + ] + }, + { + "cell_type": "markdown", + "id": "679383a7d5714e0c", + "metadata": {}, + "source": [ + "calculate_discharge_for_structure: Calculate the discharge for a given structure form a given res1d object. Returns the total discharge in l." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "9b28a84a6e9b2a97", + "metadata": {}, + "outputs": [], + "source": [ + "def calculate_discharge_for_structure(res1d, structure):\n", + " try:\n", + " values = res1d.structures[structure].Discharge.read()\n", + " #convert ot liters\n", + " total_discharge = np.sum(values) * 60 * 1000 \n", + " return total_discharge\n", + " except KeyError:\n", + " print(f'Structure {structure} not found in the Res1D data.')\n", + " return None\n", + " except AttributeError:\n", + " print (f'Discharge data not availave for strucutre {structure}.')\n", + " return None\n", + " except Exception as e:\n", + " print(f'An error occured while calculating discharge for structure {structure}: {e}')\n", + " return None" + ] + }, + { + "cell_type": "markdown", + "id": "9f76cc017567a78b", + "metadata": {}, + "source": [ + "check_flood: It is checked for each node whether the value of waterVolumeAbouveGround is greater than zero, i.e. whether flooding is present.\n", + "To do this, the additional item ‘Water volume above ground’ must be selected in the MIKE+ result specification for the hydrodinamic result file." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "1d2b04e24d558655", + "metadata": {}, + "outputs": [], + "source": [ + "def check_flood(res1d) -> bool:\n", + " Flood = False\n", + " node_names = res1d.nodes\n", + " flooded_nodes = []\n", + " \n", + " for node in node_names:\n", + " query_node = QueryDataNode(quantity='WaterVolumeAboveGround', name = node)\n", + " try:\n", + " water_level_values = query_node.get_values(res1d)\n", + " \n", + " if water_level_values is None or len(water_level_values) == 0:\n", + " raise ValueError(f'No WaterVolumeAboveGround found for node {node}')\n", + " if (water_level_values > 0).any():\n", + " Flood = True\n", + " flooded_nodes.append(node)\n", + " print(f'Flood detected for node {node}')\n", + " except Exception as e:\n", + " print(f'Error for returning value for Node {node}: {e}')\n", + " if Flood:\n", + " print('Flood detected in the system!')\n", + " print('Flooded nodes:', flooded_nodes)\n", + " else:\n", + " print('No flood detected in the system!')\n", + " return Flood" + ] + }, + { + "cell_type": "markdown", + "id": "e4df0cdeab58794e", + "metadata": {}, + "source": [ + "single_sim_run: Runs a single simulation of an event. Returns a sting massage on successfull finish." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "245c0c41a582d710", + "metadata": {}, + "outputs": [], + "source": [ + "def single_sim_run(ev):\n", + " sim_name = events.loc[events['Event'] == ev, 'SimulationName'].iloc[0]\n", + " print(f'Running sumlation {sim_name} in {threading.current_thread()}.')\n", + " engines[threading.current_thread()].run(sim_name)\n", + " return f'Completed simulation fo {ev} in {threading.current_thread()}.'" + ] + }, + { + "cell_type": "markdown", + "id": "8bc732ab8dce7aa9", + "metadata": {}, + "source": [ + "delet_result_file: deletes existing result file before a simulation corresponding to event." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "64ecc39a9f9b792e", + "metadata": {}, + "outputs": [], + "source": [ + "def delete_result_file(event):\n", + " file_path = str(temp_folder / events.loc[event, \"ResultFile\"])\n", + " try:\n", + " Path(file_path).unlink(missing_ok=True)\n", + " except FileNotFoundError:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "id": "612b3e8081cacd16", + "metadata": {}, + "source": [ + "siumlation run: Takes parameters as arguments, runs simulations as defined by events with them and returns the results as a series with Multiindex" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66215a7ad8996ecb", + "metadata": {}, + "outputs": [], + "source": [ + "def simulation_run(**kwparams):\n", + " assert set(kwparams.keys()) == set(param_specs.index)\n", + " \n", + " timestamp = datetime.datetime.now().strftime(\"%Y-%m-%dT%H.%M.%S\")\n", + "\n", + " output_filename = f\"simulation_results_{timestamp}.xlsx\"\n", + "\n", + "\n", + " result_output_path = Path(out_folder / output_filename) \n", + "\n", + " print(f\"Results will be saved to: {result_output_path}\")\n", + " \n", + " for k in kwparams.keys():\n", + " tab = param_specs.loc[k, 'Table']\n", + " col = param_specs.loc[k, 'Column']\n", + " muid = param_specs.loc[k, 'Muid']\n", + " val = kwparams[k]\n", + " dta.set_value(tab, muid, col, val)\n", + " print(f'Set value for {tab}.{muid}.{col} to value: {val}')\n", + " \n", + " if DELETE_RESFILE_BEFORE_SIM:\n", + " for l in events.index:\n", + " delete_result_file(l)\n", + " \n", + " finish_msgs = sim_executor.map(single_sim_run,events[\"Event\"].unique())\n", + " print(finish_msgs)\n", + " \n", + " results_table = pd.DataFrame(columns=['Event', 'Structure', 'Discharge', 'Load'])\n", + " total_result = 0\n", + " \n", + " for l in events.index:\n", + " result_file_path = temp_folder / events.loc[l, \"ResultFile\"]\n", + " result_file_path_str = str(Path(result_file_path))\n", + " \n", + " # Check if the result file exists\n", + " if not Path(result_file_path).exists():\n", + " print(f\"Result file does not exist: {result_file_path}\")\n", + " continue\n", + "\n", + " try:\n", + " # Create Res1D instance for each file\n", + " res1d = Res1D(result_file_path_str) \n", + " flood = check_flood(res1d)\n", + " \n", + " #If there is flooding on the roads then a penalty will be issued\n", + " if flood == True: \n", + " results_ev = 10e50\n", + " \n", + " else: \n", + " discharge_values = []\n", + " for structure, fracht in zip(structures_list, fracht_list):\n", + " discharge = calculate_discharge_for_structure(res1d, structure)\n", + " if discharge is not None:\n", + " weighted_discharge = discharge * fracht\n", + " discharge_values.append(weighted_discharge)\n", + " \n", + " results_table = pd.concat([results_table, pd.DataFrame([{\n", + " 'Event': events.loc[l, \"Event\"],\n", + " 'Structure': structure,\n", + " 'Discharge': discharge,\n", + " 'Load': weighted_discharge\n", + " }])], ignore_index=True)\n", + " \n", + " \n", + " results_ev = np.sum(discharge_values) if discharge_values else 0\n", + " print(f\"Result for result file {result_file_path}: {results_ev}\")\n", + " total_result += results_ev\n", + " \n", + " except Exception as e:\n", + " print(f\"Error processing result file {result_file_path}: {e}\")\n", + " traceback.print_exc()\n", + " results_ev = 0\n", + " \n", + " print(f\"Total result: {total_result}\") \n", + " print(results_table)\n", + " \n", + " results_table.to_excel(result_output_path, index=False)\n", + " print(f\"Results saved to {result_output_path}\")\n", + "\n", + " return total_result" + ] + }, + { + "cell_type": "markdown", + "id": "c6c65209016d9c72", + "metadata": {}, + "source": [ + "#### Test simulation run of all events with the default parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ba902b33d9088f9", + "metadata": {}, + "outputs": [], + "source": [ + "param_specs.dtypes\n", + "_test_res = simulation_run(**param_specs[\"Default\"])\n", + "display(\"_test_res:\")\n", + "display(_test_res)" + ] + }, + { + "cell_type": "markdown", + "id": "597bc0c3", + "metadata": {}, + "source": [ + "#### preparations for optimisation" + ] + }, + { + "cell_type": "markdown", + "id": "20ac8de37b5b66af", + "metadata": {}, + "source": [ + "Define optimization parameters as needed by nevergrade." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "b5506c04386e1a93", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Instrumentation(Tuple(),Dict(msm_Pump;DutyPoint;MWP_0112=Scalar{Cl(0.001,0.12,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;MWP_0142=Scalar{Cl(0.001,0.04,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;MWP_0314_I=Scalar{Cl(0.001,0.9500000000000001,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;MWP_0932_I=Scalar{Cl(0.001,0.083,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;MWP_0944_I=Scalar{Cl(0.001,0.098,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;RWP_0302=Scalar{Cl(0.001,0.5,b)}[sigma=Scalar{exp=2.03}],msm_Pump;DutyPoint;RWP_0335=Scalar{Cl(0.001,0.065,b)}[sigma=Scalar{exp=2.03}],msm_RTCAction;PIDSetPoint;HUB0201_rain=Scalar{Cl(0.062,0.518,b)}[sigma=Scalar{exp=2.03}],msm_RTCAction;PIDSetPoint;HUB0301_rain=Scalar{Cl(0.185,0.405,b)}[sigma=Scalar{exp=2.03}],msm_RTCAction;PIDSetPoint;S_SKU0216=Scalar{Cl(0.005,0.112,b)}[sigma=Scalar{exp=2.03}])):((), {'msm_Pump;DutyPoint;MWP_0142': 0.003, 'msm_Pump;DutyPoint;MWP_0112': 0.009000000000000001, 'msm_RTCAction;PIDSetPoint;HUB0301_rain': 0.261, 'msm_Pump;DutyPoint;MWP_0944_I': 0.073, 'msm_Pump;DutyPoint;MWP_0314_I': 0.004, 'msm_RTCAction;PIDSetPoint;HUB0201_rain': 0.371, 'msm_Pump;DutyPoint;RWP_0335': 0.059000000000000004, 'msm_RTCAction;PIDSetPoint;S_SKU0216': 0.012, 'msm_Pump;DutyPoint;MWP_0932_I': 0.001, 'msm_Pump;DutyPoint;RWP_0302': 0.001})" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ng_params = {}\n", + "for i in param_specs.index:\n", + " vmin = param_specs.at[i, \"Min\"]\n", + " vmax = param_specs.at[i, \"Max\"]\n", + " vdef = param_specs.at[i, \"Default\"]\n", + " ngp = ng.p.Scalar(init=vdef, lower=vmin, upper=vmax)\n", + " ng_params[i] = ngp\n", + "instrumentation = ng.p.Instrumentation(**ng_params)\n", + "instrumentation" + ] + }, + { + "cell_type": "markdown", + "id": "4523f7b62a1787ff", + "metadata": {}, + "source": [ + "params_losses_callback: returns two lists and a callback function. When the callback function is executed it puts the parameters and loss into the lists.!!Do not register this function itself as a callback, but the third returned parameter!!" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "6ef898b0e0627565", + "metadata": {}, + "outputs": [], + "source": [ + "def params_losses_callback():\n", + " params = []\n", + " losses = []\n", + "\n", + " def callback(optim, par, loss):\n", + " params.append(par)\n", + " losses.append(loss)\n", + "\n", + " # returns two lists and the function\n", + " return params, losses, callback" + ] + }, + { + "cell_type": "markdown", + "id": "cb86d6a2f0ea8c61", + "metadata": {}, + "source": [ + "optim_run: Start the optimizer input = name of the optimizer output = dataframe with details from optimization model" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "a0e2e505ea5df65a", + "metadata": {}, + "outputs": [], + "source": [ + "def optim_run(optim_name):\n", + " # set the optimizer based on the optim_name and instrumentation from params_spec\n", + " optimizer = ng.optimizers.registry[optim_name](\n", + " parametrization=instrumentation, budget=optimization_budget, num_workers=1\n", + " )\n", + " # show progressbar. updated when the optimizer gets the result of a step\n", + " optimizer.register_callback(\"tell\", ng.callbacks.ProgressBar())\n", + " # put params and loss in list\n", + " param_list, loss_list, plcb = params_losses_callback()\n", + " optimizer.register_callback(\"tell\", plcb)\n", + " # get recommendation of parameters after run is finished\n", + " reccomendation = optimizer.minimize(simulation_run, verbosity=2)\n", + " optim_run_df = pd.concat(\n", + " [\n", + " pd.DataFrame(data=[p.value[1] for p in param_list]),\n", + " pd.DataFrame({\"Optimizer\": optimizer.name, \"Loss\": loss_list}),\n", + " ],\n", + " axis=\"columns\",\n", + " )\n", + " # also add a line for the reccomendation\n", + " rec_params = reccomendation.value[1]\n", + " optim_run_df.loc[\"reccomendation\", rec_params.keys()] = rec_params\n", + " optim_run_df.loc[\"reccomendation\", \"Loss\"] = reccomendation.loss\n", + " optim_run_df.loc[\"reccomendation\", \"Optimizer\"] = optimizer.name\n", + " # save the results of the optimization as a csv and excel file\n", + " optim_run_df.to_csv(out_folder / f\"optim_run{optimizer.name}.csv\")\n", + " optim_run_df.to_excel(out_folder / f\"optim_run{optimizer.name}.xlsx\")\n", + "\n", + " return optim_run_df" + ] + }, + { + "cell_type": "markdown", + "id": "28e37afd", + "metadata": {}, + "source": [ + "#### Run optimisation" + ] + }, + { + "cell_type": "markdown", + "id": "58dd27759c113b7e", + "metadata": {}, + "source": [ + " choose optimizers from nevergrad\n", + " https://en.wikipedia.org/wiki/CMA-ES\n", + " create a list with optimizers\n", + " it is also possible to use multiple optimizers and compare the results\n", + " create dictionary to store the rundata for each optimizer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d8d2932f77d3de2", + "metadata": {}, + "outputs": [], + "source": [ + "optim_run_dfs = {}\n", + "for o in optimizers:\n", + " # start the optimization\n", + " optim_run_dfs[o] = optim_run(o)\n", + " # display results\n", + " display(optim_run_dfs[o])\n", + " optim_run_dfs[o].plot(subplots=True, figsize=(5, 2 * len(param_specs)))\n", + "\n", + "# optim_run_df already has optimizer included, use .values instead of passing the dict, so that the optimizer is not added again.\n", + "optim_run_df = pd.concat(optim_run_dfs.values())\n", + "optim_run_df.to_csv(out_folder / \"optim_run.csv\")\n", + "\n", + "# set the index if the optim_run_df\n", + "optim_run_df.index.name = \"Step\"\n", + "optim_run_df = optim_run_df.reset_index().set_index([\"Optimizer\", \"Step\"])" + ] + }, + { + "cell_type": "markdown", + "id": "0bb04f20", + "metadata": {}, + "source": [ + "Print the Results of the Optimization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a16015edf840d52", + "metadata": {}, + "outputs": [], + "source": [ + "for key, df in optim_run_df.groupby(level=\"Optimizer\"):\n", + " display(key,df.loc[(key, \"reccomendation\"), param_specs.index])" + ] + }, + { + "cell_type": "markdown", + "id": "1875e60c", + "metadata": {}, + "source": [ + "Close the database" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4ba189120eb70e3", + "metadata": {}, + "outputs": [], + "source": [ + "dta.close_database()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:anaconda3]", + "language": "python", + "name": "conda-env-anaconda3-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}