diff --git a/hydraulic_calibration/optimization_hydraulic.ipynb b/hydraulic_calibration/optimization_hydraulic.ipynb
index 9894e00ddffe3d12f8d3f38f4397614ab37fdabd..e1daeae5cb297ee2db19a678f9dc2a021d0222ca 100644
--- a/hydraulic_calibration/optimization_hydraulic.ipynb
+++ b/hydraulic_calibration/optimization_hydraulic.ipynb
@@ -4,22 +4,7 @@
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "C:\\Users\\trojan\\AppData\\Local\\Temp\\ipykernel_14844\\805292381.py:21: DeprecationWarning: \n",
-      "Pyarrow will become a required dependency of pandas in the next major release of pandas (pandas 3.0),\n",
-      "(to allow more performant data types, such as the Arrow string type, and better interoperability with other libraries)\n",
-      "but was not found to be installed on your system.\n",
-      "If this would cause problems for you,\n",
-      "please provide us feedback at https://github.com/pandas-dev/pandas/issues/54466\n",
-      "        \n",
-      "  import pandas as pd\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "\"\"\"\n",
     "Optimization with mikepluspy\n",
@@ -59,7 +44,10 @@
    "outputs": [],
    "source": [
     "def file_to_df(path: Path) -> pd.DataFrame:\n",
-    "    \"\"\"Tries to read a file at path as a pandas DataFrame.\"\"\"\n",
+    "    \"\"\"\n",
+    "    Tries to read a file at path as a pandas DataFrame\n",
+    "    Can be used for Excel, csv and mike specific dfs0 files\n",
+    "    \"\"\"\n",
     "    if path.suffix == \".xlsx\":\n",
     "        return pd.read_excel(path)\n",
     "    elif path.suffix == \".dfs0\":\n",
@@ -77,16 +65,17 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Set variables for later use\n",
+    "'''Set system variables for later use''' \n",
+    "\n",
     "# delete  result files(as specified by event config) before a sim, assures that no old file is reread in the case that a simulation does not run\n",
     "DELETE_RESFILE_BEFORE_SIM = True\n",
-    "# output heaps of additional data, may be useful for debugging\n",
+    "# output heaps of additional data, may be useful for debugging, Default = False\n",
     "VERBOSE_OUTPUT = False\n",
     "# attempt simulation at most so many times, used when a simulation fails\n",
     "MAX_SUMILATION_TRY = 3\n",
     "# Consider a simulation as failed and retry if so many values in the result compared to the reference are missing\n",
     "MAX_MISSING_RATIO = 0.6\n",
-    "#delete the temp folder at end\n",
+    "# delete the temp folder at end, Default = True\n",
     "DELETE_TEMP = True\n",
     "# time string for output folder\n",
     "time_path_part = datetime.datetime.now().strftime(\"%Y-%m-%dT%H.%M.%S\")\n",
@@ -104,7 +93,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# variables which need to be set before each optimization\n",
+    "'''variables which need to be set before each optimization''' \n",
+    "\n",
     "# Number of simulations allowed for optimization\n",
     "optimization_budget = 60\n",
     "# a folder where used files are saved\n",
@@ -149,7 +139,7 @@
     }
    ],
    "source": [
-    "#print the path of the temp folder\n",
+    "# print the path of the temp folder\n",
     "temp_folder.absolute()"
    ]
   },
@@ -182,9 +172,12 @@
     }
    ],
    "source": [
-    "# MIKE+Py import it's own version of mikeio1d, which does not work for our files and also has odd problems with mikeio.\n",
-    "# Therefore use a multiprocessing.Listener, which run in its own process with its own imports, and reads the file.\n",
-    "# the script \"mikeio_listener.py\" needs to be in the same folder, or has to be startd beforehand.\n",
+    "'''\n",
+    "MIKE+Py import it's own version of mikeio1d, which does not work for our files and also has odd problems with mikeio.\n",
+    "Therefore use a multiprocessing.Listener, which run in its own process with its own imports, and reads the file.\n",
+    "the script \"mikeio_listener.py\" needs to be in the same folder, or has to be startd beforehand.\n",
+    "'''\n",
+    "\n",
     "reader_address = R\"\\\\.\\pipe\\mikeio_pipe\"\n",
     "auth_key = b\"res1dreader\"\n",
     "try:\n",
@@ -218,7 +211,7 @@
     }
    ],
    "source": [
-    "# open the MIKE+ database using MIKE+Py.\n",
+    "# open the MIKE+ database using MIKE+Py\n",
     "dta = DataTableAccess(db_path)\n",
     "# open the database from MIKE+\n",
     "dta.open_database()\n",
@@ -232,7 +225,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Create the Engine1D object that will be used to run MIKE 1D simulations.\n",
+    "# Create the Engine1D object that will be used to run MIKE 1D simulations\n",
     "engine = Engine1D(dta.datatables)"
    ]
   },
@@ -267,13 +260,16 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# specify which parameters are optimized and in which range they should be\n",
-    "# could make the table in python like this\n",
-    "# param_specs = pd.DataFrame([\n",
-    "#     {\"Table\":\"msm_HParA\",\"Muid\":DEF_STRING,\"Column\":\"RedFactor\",\"Min\":.4,\"Max\":1,\"Steps\":4},\n",
-    "#     {\"Table\":\"msm_HParA\",\"Muid\":DEF_STRING,\"Column\":\"InitLoss\",\"Min\":0,\"Max\":5*M_TO_MM,\"Steps\":4}\n",
-    "# ])\n",
-    "# or load from a file\n",
+    "'''\n",
+    "specify which parameters are optimized and in which range they should be\n",
+    "could make the table in python like this\n",
+    "param_specs = pd.DataFrame([\n",
+    "    {\"Table\":\"msm_HParA\",\"Muid\":DEF_STRING,\"Column\":\"RedFactor\",\"Min\":.4,\"Max\":1,\"Steps\":4},\n",
+    "    {\"Table\":\"msm_HParA\",\"Muid\":DEF_STRING,\"Column\":\"InitLoss\",\"Min\":0,\"Max\":5*M_TO_MM,\"Steps\":4}\n",
+    "])\n",
+    "or load from a file:\n",
+    "'''\n",
+    "\n",
     "param_specs = file_to_df(temp_folder / \"parameter_config.xlsx\")\n",
     "# give params_specs a sensible string index, this is later used to pass optimization parameters as a dictionary\n",
     "param_specs.index = pd.Index(\n",
@@ -707,19 +703,19 @@
     }
    ],
    "source": [
-    "# read the event_config.xlsx in which for each sim_muid=events variables are set.\n",
+    "# read the event_config.xlsx in which for each sim_muid=events variables are set\n",
     "events = file_to_df(temp_folder / \"event_config.xlsx\").set_index(\"Label\")\n",
-    "# make sure that empty strings are translated.\n",
+    "# make sure that empty strings are translated\n",
     "events[\"ResultReach\"] = events[\"ResultReach\"].fillna(\"\").astype(str)\n",
     "events[\"ResultNode\"] = events[\"ResultNode\"].fillna(\"\").astype(str)\n",
     "# check if start time of events is before the end time.\n",
     "assert (events[\"Start\"] <= events[\"End\"]).all(\n",
     "    axis=None\n",
     "), \"Event end needs to be after start.\"\n",
-    "# add timezone. Use same as sensor_date.\n",
+    "# add timezone. Use same as sensor_date\n",
     "events[\"Start\"]=events[\"Start\"].dt.tz_localize(timezone)\n",
     "events[\"End\"]=events[\"End\"].dt.tz_localize(timezone)\n",
-    "# check if threre are duplicated events.\n",
+    "# check if there are duplicated events\n",
     "assert events.index.is_unique, \"Need unique labels\"\n",
     "assert events.drop_duplicates(subset=\"Event\")[\"SimulationName\"].is_unique, \"Need exactly one simulation name per event\"\n",
     "events"
@@ -751,17 +747,17 @@
     "    reader_c.send(params)\n",
     "    # second to receive the data from the result file    \n",
     "    df = reader_c.recv()\n",
-    "    # reduce result to desired column.\n",
+    "    # reduce result to desired column\n",
     "    series = df[column]\n",
-    "    # add timezone to the index.\n",
+    "    # add timezone to the index\n",
     "    series.index = series.index.tz_localize(timezone)\n",
-    "    # print the data before processing.\n",
+    "    # print the data before processing\n",
     "    if VERBOSE_OUTPUT:\n",
     "        display(f\"{file_path}[{column}] before resample:\")\n",
     "        display(series)\n",
-    "    # resample to be comparable to results.\n",
+    "    # resample to be comparable to results\n",
     "    # Linear interpolation with pandas first throws away all values where the index does not match to the frequency,\n",
-    "    # therefore use nearest instead of interpolate.\n",
+    "    # therefore use nearest instead of interpolate\n",
     "    series = series.resample(timestep).nearest()\n",
     "    # reduce result to desired times\n",
     "    series = series.loc[(series.index >= start) & (series.index <= end)]\n",
@@ -803,10 +799,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# create a dictionary to save parts of the reference corresponding to label.\n",
+    "# create a dictionary to save parts of the reference corresponding to label\n",
     "ref_parts = {}\n",
     "\n",
-    "# read the data for each label.\n",
+    "# read the data for each label\n",
     "for label in events.index:\n",
     "    filepath = temp_folder / events.loc[label, \"ReferenceFile\"]\n",
     "    col = events.loc[label, \"ReferenceColumn\"]\n",
@@ -836,7 +832,7 @@
     "    ref_part = ref_part.resample(events.loc[label, \"Timestep\"]).nearest()\n",
     "    ref_part = ref_part*events.loc[label, \"ReferenceUnitFactor\"]\n",
     "    assert ref_part.isna().any(axis=None) == False\n",
-    "    # write in dictionary.\n",
+    "    # write in dictionary\n",
     "    ref_parts[label] = ref_part\n",
     "\n",
     "# write dictionary into a pandas series with index Label and Datetime    \n",
@@ -892,7 +888,7 @@
     "    res_volume = res.sum()  # Gesamtvolumen der Ergebnisdaten\n",
     "    dif = abs((res_volume - ref_volume))\n",
     "    # if reference volumne is 0 use replacement value. More useful for optimization\n",
-    "    # replacement value for the divisior when ref_volume is zero.\n",
+    "    # replacement value for the divisior when ref_volume is zero\n",
     "    _vfp_divisor_on_zero = reference.groupby(level=\"Label\").sum().mean()\n",
     "    if ref_volume == 0:\n",
     "        ref_volume = _vfp_divisor_on_zero\n",
@@ -903,7 +899,7 @@
     "def weighted_vfp(ref: pd.Series, res: pd.Series) -> float:\n",
     "    '''Weighted Volume Error'''\n",
     "    labels = events.index.get_level_values(\"Label\")\n",
-    "    # read the event weights also put timestep into weights.\n",
+    "    # read the event weights also put timestep into weights\n",
     "    weights = events[\"EventWeight\"].to_numpy()\n",
     "    # calculate the vfp for each label\n",
     "    per_label = np.array(\n",
@@ -965,8 +961,8 @@
     "def single_sim_run(ev,**kwparams):\n",
     "    \"\"\"\n",
     "    Runs a single simulation for event ev.\n",
-    "    \n",
     "    \"\"\"\n",
+    "    \n",
     "    labels = events.index[events[\"Event\"]==ev]\n",
     "    # delete previous result file\n",
     "    if DELETE_RESFILE_BEFORE_SIM:\n",
@@ -1383,9 +1379,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "myenv",
    "language": "python",
-   "name": "python3"
+   "name": "myenv"
   },
   "language_info": {
    "codemirror_mode": {
@@ -1397,7 +1393,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.11"
+   "version": "3.9.18"
   }
  },
  "nbformat": 4,