diff --git a/District/database/to-do.txt b/District/database/to-do.txt deleted file mode 100644 index e05d2de1bd6b4bd58bb7dfdbc4ae3d04e65acf26..0000000000000000000000000000000000000000 --- a/District/database/to-do.txt +++ /dev/null @@ -1 +0,0 @@ -Add pre-configured districts diff --git a/District/main_district.py b/District/main_district.py new file mode 100644 index 0000000000000000000000000000000000000000..ccf0afc2198608a2018ae216c402d79d3acfba3f --- /dev/null +++ b/District/main_district.py @@ -0,0 +1,253 @@ +import time +import os +import math +import pandas as pd + +import Model_Library.District.model as comm_model +import Model_Library.District.scripts as scripts +from Model_Library.District.scripts.extract_inputs import DistrictExtractor + +class MainDistrict: + def __init__(self, prosumer_dict, comm_assets, time_steps, t_step, comm_dict, ca_strategy, comm_strategy, t_horizon): + start_comm_init = time.time() + + Extractor = DistrictExtractor() + # Create prosumer object + comm_config = {} + comm_config = Extractor.get_config(comm_dict['community']['config_path']) + + # Read time series data from path + comm_data_path_dict = {} + input_profiles = {} + + comm_data_path_dict['community'] = Extractor.get_path(comm_dict['community']['data_path']) + input_profiles['community'] = Extractor.run(comm_data_path_dict, 'community',t_step) + comm_config['injection_price'] = input_profiles['community']['elec_price'].iloc[:,0] + comm_config['elec_price'] = input_profiles['community']['elec_price'].iloc[:,0] + + comm_config['elec_price_int_low'] = (comm_config['elec_price'] + comm_config['network_usage_energy_fee_low'] + + comm_config['levies_int'] + comm_config['concession'] + comm_config[ + 'electricity_tax_int']) \ + * (1 + comm_config['VAT']) + comm_config['elec_price_int_high'] = (comm_config['elec_price'] + comm_config['network_usage_energy_fee_high'] + + comm_config['levies_int'] + comm_config['concession'] + comm_config[ + 'electricity_tax_int']) \ + * (1 + comm_config['VAT']) + comm_config['elec_price_ext_low'] = (comm_config['elec_price'] + comm_config['network_usage_energy_fee_low'] + + comm_config['levies_ext'] + comm_config['concession'] + comm_config[ + 'electricity_tax_ext']) \ + * (1 + comm_config['VAT']) + comm_config['elec_price_ext_high'] = (comm_config['elec_price'] + comm_config['network_usage_energy_fee_high'] + + comm_config['levies_ext'] + comm_config['concession'] + comm_config[ + 'electricity_tax_ext']) \ + * (1 + comm_config['VAT']) + + + community = comm_model.BaseComm('TestComm', {'Prosumer': prosumer_dict, 'Comm_Comps': comm_assets.comm_assets}, + time_steps, {'Prosumer': prosumer_dict.keys(), 'Comm_Comps': comm_assets.comm_assets.keys()}, + t_step, comm_config, ca_strategy) + end_comm_init = time.time() + + """---------------------ROLLING HORIZON (RH) ------------------------------------------------------------------------""" + """-----------------------------------------------------------------------------------------------------------------""" + + t_overlap_default = 0 # deafult: 0; nr of timesteps the starting point of each interval is shifted into the prior interval + t_interval_default = 4*24 # default: 4*24; number of time steps in one interval of the RH + + t_start_rh = time_steps[0] # start of + t_last = 0 + + t_interval = t_interval_default / t_step + last_t_interval = t_interval_default / t_step + + last_t_overlap = t_overlap_default / t_step + t_overlap = t_overlap_default / t_step + + start_rh = time.time() + n_change = 0 + + while t_last * t_step < t_horizon: + print('-------------------' + str(t_last) + '-----------------') + + # PREPARE INTERVAL + if (t_last + t_interval - t_overlap) * t_step < t_horizon and (t_last + t_interval) * t_step <= t_horizon: + # interval does not reach overall end + t_horizon_rh = int(t_interval) # number of time steps in this RH-interval + t_fix = max(t_horizon_rh - int(t_overlap), 1) # number of fixed time steps of this RH-interval + elif (t_last + t_interval - t_overlap) * t_step < t_horizon and (t_last + t_interval) * t_step > t_horizon: + # interval reaches end of horizon, but just without overlap + t_horizon_rh = int(len(time_steps) - t_last) # interval length is adapted + t_fix = max(int(t_interval) - int(t_overlap), 1) # but not the number of fixed time steps + else: + # fixed time steps hit overall time horizon --> adapt also the fixed time steps + t_horizon_rh = int(len(time_steps) - t_last) + t_fix = t_horizon_rh + + # extract relevant timesteps values from overall time series + time_steps_rh = time_steps[t_last:t_last + t_horizon_rh] # whole interval size + time_steps_fix = time_steps[t_last:t_last + t_fix] # without overlap + + # STEP 1 --> SCHEDULE EXTRACTION + start_schedule = time.time() + for ps in prosumer_dict.values(): + valid = ps.calc_rh_step_schedule(time_steps_rh) + if not valid: + n_change += 1 + t_interval = int(math.floor(t_interval / 2)) + if t_interval == 0: + t_interval = 1 + if t_interval < t_overlap: + t_overlap = t_interval - 1 + break + + if not valid: + continue + + for cc in community.comm_assets.values(): + valid = cc.calc_rh_step_schedule(time_steps_rh) + if not valid: + n_change += 1 + t_interval = int(math.floor(t_interval / 2)) + if t_interval == 0: + t_interval = 1 + if t_interval < t_overlap: + t_overlap = t_interval - 1 + break + + if not valid: + continue + + end_schedule = time.time() + + # STEP 2 --> DF IDENTIFICATION + start_flex_calc = time.time() + for ps in prosumer_dict.values(): + ps.calc_flex(time_steps_rh) + + for cc in community.comm_assets.values(): + cc.calc_flex(time_steps_rh) + + # aggregate prosumer DFs in community + community.aggregate_temp(time_steps_rh, prosumer_dict, community.comm_assets) + end_flex_calc = time.time() + + # COMMUNITY OPTIMIZATION + start_opt = time.time() + valid = community.run_optimization(time_steps_rh, comm_strategy, timelimit=200) + + if not valid: + n_change += 1 + t_interval = int(math.floor(t_interval / 2)) + if t_interval == 0: + t_interval = 1 + if t_interval < t_overlap: + t_overlap = t_interval - 1 + continue + end_opt = time.time() + + # VALIDATION + start_validation = time.time() + valid = community.validate_result(time_steps_fix) + if valid: + # fix results... + for ps in prosumer_dict.values(): + ps.fix_results(time_steps_fix) + for cc in community.comm_assets.values(): + cc.fix_results(time_steps_fix) + community.aggregate_fix(time_steps_fix) + + # ...and go to next interval + t_last = int(t_last + max(t_interval - t_overlap, 1)) + t_interval = t_interval_default * 1 / t_step + t_overlap = t_overlap_default * 1 / t_step + + elif not valid: + # nr of interval changes + n_change += 1 + t_interval = int(math.floor(t_interval / 2)) + if t_interval == 0: + t_interval = 1 + if t_interval < t_overlap: + t_overlap = t_interval - 1 + + if not valid and t_interval < 1: + print('Unteres Limit erreicht') + end_validation = time.time() + + end_rh = time.time() + + community.analyze_results_new() + + start_write = time.time() + folder_name = 'Community' + flag = True + iter = 0 + while flag: + # ToDo JBR: apply Felix changes + try: + os.mkdir('output_files/' + folder_name) + flag = False + except FileExistsError: + iter += 1 + folder_name = folder_name + '_' + str(iter) + + + self.show_results_agents(prosumer_dict, folder_name) + + self.show_results_agents(comm_assets.comm_assets, folder_name) + # Todo JBR: change to community main + self.export_comm(community, time_steps, folder_name) + + def export_comm(self, community, time_steps, folder): + """ + Community data is stored in excel sheet. + ToDo JBR: make it more dynamic and clean the data that is stored + + Parameters + ---------- + community: instance of Community-class + time_steps: time steps of the whole model + folder: name of folder where data is stored + """ + + comm_attr = community.show_yourself() + + p_int = comm_attr['config']['elec_price_int_low'][time_steps] + p_inj = comm_attr['config']['injection_price'][time_steps] + p_ext = comm_attr['config']['elec_price_ext_low'][time_steps] + + prices = pd.DataFrame({'internal buy': p_int, 'sell': p_inj, 'external buy': p_ext}) + + annuities_df = pd.DataFrame(comm_attr['annuity_dict']) + grd_exchange = comm_attr['community_power_data'] + rsl = comm_attr['_rsl'] + analysis_eco = pd.DataFrame(comm_attr['analysis']['economical']) + analysis_tech = pd.DataFrame(comm_attr['analysis']['technical']) + + with pd.ExcelWriter('output_files/' + folder + '/results_' + community.name + '.xlsx') as writer: + prices.to_excel(writer, sheet_name='Prices') + annuities_df.to_excel(writer, sheet_name='PS-Annuities') + for key, value in grd_exchange.items(): + value.to_excel(writer, sheet_name=str(key)) + + for key, value in rsl.items(): + value.to_excel(writer, sheet_name=str(key)) + analysis_eco.to_excel(writer, sheet_name='Analysis economical') + analysis_tech.to_excel(writer, sheet_name='Analysis technical') + + def show_results_agents(self, agents_data, folder): + """ + Stores the prosumers results. The data includes information about the initially calculated schedule and the + deviations from CA usage or community-sharing. + + Parameters + ---------- + ps_list: list with prosumer objects + folder: name of the folder where the data is stored + + """ + for each_agent in agents_data.keys(): + results = agents_data[each_agent].get_results() + with pd.ExcelWriter('output_files/' + folder + '/results_' + str(each_agent) + '.xlsx') as writer: + for rsl in results: + results[rsl].to_excel(writer, sheet_name=str(rsl)) \ No newline at end of file diff --git a/District/model/BaseComm.py b/District/model/BaseComm.py index c295ad0953779df0a30993211e447140afa581b8..c96802d9c44a17b7ff1ca2efce7e4620023332fc 100644 --- a/District/model/BaseComm.py +++ b/District/model/BaseComm.py @@ -37,8 +37,6 @@ from component_library.component_models.EMS_components.Coordinator import Coordi component_lib_path = os.path.dirname(os.path.abspath(__file__)) component_lib_path = os.path.join(os.path.dirname(os.path.dirname(component_lib_path)), 'community_library', 'model') -# TODO is not working yet -#components = scripts.get_all_class.run(component_lib_path) class BaseComm: @@ -53,8 +51,8 @@ class BaseComm: self.comm_assets = actors_dict['Comm_Comps'] self.flex = pd.DataFrame(index=self.T) self.community_power_data = dict() - self.__rsl = dict() - self.__var_dict_sizing = dict() + self._rsl = dict() + self._var_dict_sizing = dict() self.analysis = dict() self.ca_strategy = ca_strategy self.annuity_dict = dict() # stores the annuities of all prosumers @@ -187,7 +185,7 @@ class BaseComm: self.community_power_data['fixed']['agg_demand'] = 0.0 self.community_power_data['fixed']['internal_exchange'] = 0.0 self.flex_activations = pd.DataFrame(index=self.T) - self.__rsl['fixed_rsl'] = pd.DataFrame(data= self.temp_rsl, index=self.T) + self._rsl['fixed_rsl'] = pd.DataFrame(data= self.temp_rsl, index=self.T) for ps_name, ps in self.prosumer.items(): if t_fixed[0] == self.T[0]: @@ -233,7 +231,7 @@ class BaseComm: self.community_power_data['rescheduled'].loc[t_fixed, 'agg_generation'].combine( self.community_power_data['rescheduled'].loc[t_fixed, 'agg_demand'],min) - self.__rsl['fixed_rsl'].loc[t_fixed] = self.temp_rsl.loc[t_fixed] + self._rsl['fixed_rsl'].loc[t_fixed] = self.temp_rsl.loc[t_fixed] # UPDATE COORDINATOR #ToDO JBR: Update net_gen and net_demand as well @@ -293,8 +291,8 @@ class BaseComm: self.model_sizing.add_component('annual_cost_' + each_var[1] + '_' + cc_name, var_dict[(each_var[0], each_var[1], cc_name)]) self.model_sizing.cons.add(var_dict[(each_var[0], each_var[1], cc_name)] == var) - self.__var_dict_sizing = var_dict - self.coordinator.implement_strategy(time_steps, self.__var_dict_sizing, self.model_sizing, [self.ca_strategy]) + self._var_dict_sizing = var_dict + self.coordinator.implement_strategy(time_steps, self._var_dict_sizing, self.model_sizing, [self.ca_strategy]) solver = pyo.SolverFactory('gurobi') solver.options['MIPGap'] = 0.02 @@ -303,26 +301,26 @@ class BaseComm: if (self.__solver_result.solver.status == SolverStatus.ok) and ( self.__solver_result.solver.termination_condition == TerminationCondition.optimal): - if 'sizing' not in self.__rsl.keys(): - self.__rsl = {'sizing': pd.DataFrame(columns=list(self.__var_dict_sizing.keys()), index=self.T)} - self.__rsl = {'sizing': self.__extract_results(self.__var_dict_sizing, time_steps)} + if 0 not in self._rsl.keys(): + self._rsl = {0: pd.DataFrame(columns=list(self._var_dict_sizing.keys()), index=self.T)} + self._rsl = {0: self.__extract_results(self._var_dict_sizing, time_steps)} # add the result to the prosumer class ov = pyo.value(self.model_sizing.f1) print('OV: ' + str(ov)) for cc_name, cc in self.comm_assets.items(): - cc.add_result(self.__rsl['sizing'], 'sizing') - for var in self.__var_dict_sizing.keys(): + cc.add_result(self._rsl[0], 0) + for var in self._var_dict_sizing.keys(): if 'cap' == var[0]: print('Finished comm comp sizing --> The size of the community component ' + str( - var[1]) + ' is ' + str(self.__rsl['sizing'][var][0])) + var[1]) + ' is ' + str(self._rsl[0][var][0])) else: - self.__rsl['post_sizing'] = self.__extract_results(self.__var_dict_sizing, time_steps) + self._rsl['post_sizing'] = self.__extract_results(self._var_dict_sizing, time_steps) - for var in self.__var_dict_sizing.keys(): + for var in self._var_dict_sizing.keys(): if 'cap' == var[0]: print('Finished comm comp post sizing --> The size of the community component ' + str( - var[1]) + ' is ' + str(self.__rsl['post_sizing'][var][0])) + var[1]) + ' is ' + str(self._rsl['post_sizing'][var][0])) return @@ -336,28 +334,28 @@ class BaseComm: # ADD TO COMMUNITY DATA self.community_power_data['comm_comps'] = pd.DataFrame(index=self.T) if self.ca_strategy == 'sizing_min_costs': - self.community_power_data['comm_comps']['residual_generation'] = self.__rsl['sizing']['P_opt_generation'] - self.__rsl['sizing']['P_opt_demand'] + self.community_power_data['comm_comps']['residual_generation'] = self._rsl[0]['P_opt_generation'] - self._rsl[0]['P_opt_demand'] elif self.ca_strategy == 'sizing_max_wholesale_profit': - self.community_power_data['comm_comps']['residual_generation'] = self.__rsl['sizing']['residual_supply'] + self.community_power_data['comm_comps']['residual_generation'] = self._rsl[0]['residual_supply'] elif self.ca_strategy == 'sizing_max_welfare' or self.ca_strategy == 'sizing_max_operational_profit': - ext_demand = self.__rsl['sizing']['P_external_demand'] - ext_supply = self.__rsl['sizing']['P_external_supply'] + ext_demand = self._rsl[0]['P_external_demand'] + ext_supply = self._rsl[0]['P_external_supply'] self.community_power_data['comm_comps']['residual_generation'] = copy.copy(ext_supply - ext_demand) elif self.ca_strategy == 'sizing_max_welfare_peak': - ext_demand = self.__rsl['sizing']['P_external_demand_low'] + self.__rsl['sizing']['P_external_demand_high'] - ext_supply = self.__rsl['sizing']['P_external_supply_low'] + self.__rsl['sizing']['P_external_supply_high'] + ext_demand = self._rsl[0]['P_external_demand_low'] + self._rsl[0]['P_external_demand_high'] + ext_supply = self._rsl[0]['P_external_supply_low'] + self._rsl[0]['P_external_supply_high'] self.community_power_data['comm_comps']['residual_generation'] = copy.copy(ext_supply - ext_demand) self.community_power_data['comm_comps']['agg_generation'] = copy.copy(self.community_power_data['initial']['agg_generation']) self.community_power_data['comm_comps']['agg_demand'] = copy.copy(self.community_power_data['initial']['agg_demand']) self.community_power_data['comm_comps']['internal_exchange'] = 0.0 - for var in self.__var_dict_sizing.keys(): + for var in self._var_dict_sizing.keys(): if isinstance(var, tuple): if var[0] == 'output': - self.community_power_data['comm_comps']['agg_generation'] += self.__rsl['sizing'][var] + self.community_power_data['comm_comps']['agg_generation'] += self._rsl[0][var] elif var[0] == 'input': - self.community_power_data['comm_comps']['agg_demand'] += self.__rsl['sizing'][var] + self.community_power_data['comm_comps']['agg_demand'] += self._rsl[0][var] self.community_power_data['comm_comps']['internal_exchange'] = self.community_power_data['comm_comps']['agg_demand'].combine( self.community_power_data['comm_comps']['agg_generation'], min) @@ -741,7 +739,7 @@ class BaseComm: return result_df def plot_results(self, strategy_list): - rsl = self.__rsl['fixed'] + rsl = self._rsl['fixed'] x_data = self.T fig, ax = plt.subplots() @@ -856,9 +854,9 @@ class BaseComm: inv_costs = 0 if len(self.comm_assets) > 0 and key != 'initial': - for each_var in self.__var_dict_sizing: + for each_var in self._var_dict_sizing: if each_var[0] == 'annual_cost' and each_var[-1] in self.comm_assets.keys(): - inv_costs += self.__rsl['sizing'][each_var][0] + inv_costs += self._rsl[0][each_var][0] annuity = -inv_costs + \ (internal_revenue.sum() + external_revenue.sum() diff --git a/District/scripts/extract_inputs.py b/District/scripts/extract_inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..51ed7e4f67d25900d376c74b52e74081875687b0 --- /dev/null +++ b/District/scripts/extract_inputs.py @@ -0,0 +1,6 @@ +from Model_Library.Prosumer.scripts.extract_inputs import ProsumerExtractor + + +class DistrictExtractor(ProsumerExtractor): + def __init__(self): + super().__init__() diff --git a/Prosumer/main.py b/Prosumer/main.py index a1095fecf788a87017d05e87e47524d39090d81f..e22fa3296a3332f5ccfcd7af3d03d341a48d9706 100644 --- a/Prosumer/main.py +++ b/Prosumer/main.py @@ -1,28 +1,6 @@ # main.py is the central script to execute the flexible modelling of prosumers within this simulation tool. # For further Information please have a closer look at the documentation in "README.txt" -""" -The FEN-Tool is an optimisation tool for prosumer, district, and interconnected city models. - -Copyright (C) 2022. Mauricio Celi Cortés, Jingyu Gong, Jonas van Ouwerkerk, Felix Wege, Nie Yi, Jonas Brucksch - -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU Lesser General Public License -as published by the Free Software Foundation; either version 3 of -the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with this library; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA -02110-1301 USA. - -Project host: RWTH Aachen University, Aachen, Germany -Project Website: https://www.fenaachen.net/projekte/fen-ineed-dc -""" +# This work is published under the public license: XXX # Importing the necessary packages # from scripts import extract_inputs @@ -30,15 +8,19 @@ Project Website: https://www.fenaachen.net/projekte/fen-ineed-dc import Model_Library.Prosumer.scripts as scripts import Model_Library.Prosumer.model as ps_model -import Database_Connector.use_database as use_db +import Model_Library.Prosumer.model +#import scripts.Database.use_database as use_db import time import pandas as pd from datetime import datetime import os +import numpy as np import Model_Library.Prosumer.scripts.time_series_processing as ts import Model_Library.Prosumer.scripts.calc_irradiance as irr +from Model_Library.Prosumer.scripts.extract_inputs import ProsumerExtractor + class Main: def __init__(self, mode, *args, aggregation=False): # The number of elements in prosumer name defines the number of prosumers @@ -53,6 +35,8 @@ class Main: self.savings = {} self.total_costs = {} self.name_list = list(args[0].keys())[0] + + Extractor = ProsumerExtractor() # The Process can only run with one prosumer now. # In the next steps, multiple prosumers will be allowed if mode == 1: # connect with data bank @@ -105,7 +89,7 @@ class Main: self.hot_water_demand_yearly = args[0][self.name_list]['hot_water_demand'] # Create prosumer object - prosumer_config[self.name_list] = scripts.extract_inputs.get_config(configuration_path) + prosumer_config[self.name_list] = Extractor.get_config(configuration_path) self.prosumer[self.name_list] = ps_model.BaseProsumer( name=self.name_list, prosumer_configuration=prosumer_config[self.name_list]) @@ -121,7 +105,7 @@ class Main: print('The strategies added are: ' + str(self.prosumer[self.name_list].get_strategy())) # Extract sector matrices - matrices_path = scripts.extract_inputs.get_sector_matrices(topology_path) + matrices_path = Extractor.get_sector_matrices(topology_path) sector_matrices = {} for each_sector in matrices_path: # import csv matrices and save in a dictionary @@ -135,8 +119,8 @@ class Main: self.prosumer[self.name_list].set_components(sector_matrices, self.storage_states) # Read time series data from path - prosumer_data_path_dict[self.name_list] = scripts.extract_inputs.get_path(data_path) - input_profiles[self.name_list] = scripts.extract_inputs.run(prosumer_data_path_dict, self.name_list, + prosumer_data_path_dict[self.name_list] = Extractor.get_path(data_path) + input_profiles[self.name_list] = Extractor.run(prosumer_data_path_dict, self.name_list, self.t_step) #test=input_profiles['SCN2_CAT1_PV_3000_6000']['irradiance'].isnull().any() # Adjust irradiation for required tilt @@ -318,7 +302,7 @@ class Main: self.charge_status[name_list] = {} t1 = time.time() - self.prosumer[name_list].show_yourself()['_BaseProsumer__model'].write( + self.prosumer[name_list].show_yourself()['_model'].write( 'output_files/'+name_list+'/model.lp', io_options={'symbolic_solver_labels': True}) t2 = time.time() #print('show_yourself took [s]:\t' + str(t2 - t1)) @@ -431,3 +415,403 @@ class Main: # prosumer.plt_results(('H2_E', 'prss_st'), ('fc', 'elec_cns'), ('elec_cns', 'electroly'), ('inv', 'grd'), # name='hydrogen-elec_grid', dpi=100) # prosumer.plt_results(('H2_E', 'prss_st'), ('fc', 'elec_cns'), ('elec_cns', 'electroly'), ('inv', 'grd'), name='hydrogen-elec_grid', dpi=100) + + def show_results_agents(self, agents_list, folder): + """ + Stores the prosumers results. The data includes information about the initially calculated schedule and the + deviations from CA usage or community-sharing. + + Parameters + ---------- + ps_list: list with prosumer objects + folder: name of the folder where the data is stored + + """ + for each_agent in agents_list: + results = self.comm_assets[each_agent].get_results() + with pd.ExcelWriter('output_files/' + folder + '/results_' + str(each_agent) + '.xlsx') as writer: + for rsl in results: + results[rsl].to_excel(writer, sheet_name=str(rsl)) + +class Main_CA: + def __init__(self, mode, *args, aggregation=False): + # The number of elements in prosumer name defines the number of prosumers + # self.prosumer stores all instantiated prosumers in a dict + # self.prosumer = {prosumer_name: prosumer obj} + self.charge_status = {} + self.interim_results = {} + self.comm_assets = {} + self.commentary = args[8] + self.storage_states = args[9] + self.t_shift = args[10] + self.name_list = args[0].keys() + + Extractor = ProsumerExtractor() + # The Process can only run with one prosumer now. + # In the next steps, multiple prosumers will be allowed + if mode == 1: # connect with data bank + # todo: adjust use_database get_scenario after the adjustment for + # mode 2 + start_db = time.perf_counter() + end_db = time.perf_counter() + + # create sector matrices for each prosumer + scenario = args[0] + sector_matrices, input_profiles, ca_config, scenario_time, comp_dict, name_list = \ + use_db.get_scenario(scenario) + + # Set the simulation time frame + t_st = datetime.strptime(scenario_time['t_start'][0], "%Y-%m-%dT%H:%M:%S") # needs to be string + t_end = datetime.strptime(scenario_time['t_end'][0], "%Y-%m-%dT%H:%M:%S") # "2019-07-25 00:00:00" + t_diff = t_end - t_st + self.t_start = str(t_st) + self.t_horizon = t_diff.days * 24 # time in [h] + self.t_step = scenario_time['t_step'][0] # time step in [h] + elif mode == 2: # use local data + start_db = 0 + end_db = 0 + # Set the simulation time frame for iteration + self.t_start = args[1] # "2019-07-25 00:00:00" + self.t_end = args[2] # "2019-07-25 00:00:00" + self.t_step = args[3] # time step in [h] + # Horizon for prediction + self.t_horizon = (pd.Timestamp(self.t_end) - pd.Timestamp(self.t_start)) / np.timedelta64(self.t_step, 'h') + 1 + self.prediction_methods = args[4] + self.t_current_value_length = args[5] + self.end_of_simulation = args[6] + self.t_history = args[7] + # start date for profile generation (e.g. demand) + self.t_begin_profile = self.t_start - pd.Timedelta(days=self.t_history) + # length of profile to be generated + self.t_profile_length = (pd.Timestamp(self.t_end) - pd.Timestamp(self.t_begin_profile)) / np.timedelta64(self.t_step, 'h') + 24 + + # Extraction of input files + input_profiles = dict.fromkeys(self.name_list) + ca_config = dict.fromkeys(self.name_list, {}) + ca_data_path_dict = dict.fromkeys(self.name_list, {}) + + for each_ca in self.name_list: + topology_path = args[0][each_ca]['topology_path'] + configuration_path = args[0][each_ca]['config_path'] + data_path = args[0][each_ca]['data_path'] + + # Create prosumer object + ca_config[each_ca] = Extractor.get_config(configuration_path) + self.comm_assets[each_ca] = ps_model.CommunityAsset( + name=each_ca, + ca_configuration=ca_config[each_ca]) + + # Set dates for optimization problem + self.comm_assets[each_ca].set_dates(self.t_start, self.t_horizon, t_step=self.t_step) + + # Add strategy of prosumer + self.comm_assets[each_ca].add_strategy( + ['variable_operation_costs', 'own_consumption', 'annuity', + 'co2']) + if self.commentary: + print('The strategies added are: ' + str(self.comm_assets[each_ca].get_strategy())) + + # Extract sector matrices + matrices_path = Extractor.get_sector_matrices(topology_path) + sector_matrices = {} + for each_sector in matrices_path: + # import csv matrices and save in a dictionary + sector_matrices.update( + {each_sector.replace('_matrix_path', ''): pd.read_csv(matrices_path[each_sector])}) + + # Set storage components from matrix to state of previous calculations if required + if self.storage_states: + self.comm_assets[each_ca].set_components(sector_matrices, self.storage_states[each_ca]) + else: + self.comm_assets[each_ca].set_components(sector_matrices, self.storage_states) + + # Read time series data from path + ca_data_path_dict[each_ca] = Extractor.get_path(data_path) + input_profiles[each_ca] = Extractor.run(ca_data_path_dict, each_ca, + self.t_step) + + # Adjust irradiation for required tilt + lambda_1 = 14.122 + lambda_st = 15 + phi = 52.21 + psi_f = 0 + beta = 30 + input_profiles[each_ca]['irradiance'] = \ + irr.generate_g_t_series(input_profiles[each_ca]['irradiance'], beta, psi_f, phi, + lambda_st, lambda_1, self.t_begin_profile, self.t_profile_length, t_step=self.t_step) + + # Generate demands with generic generator tools if required + # DIFFERENT DEMANDS THAN PROSUMERS + for path_item in ca_data_path_dict[each_ca]: + if ca_data_path_dict[each_ca][path_item] == 'generate': + if path_item == 'demand_electric': + input_profiles[each_ca]['demand_electricity'] = \ + ts.generate_profile(self.t_begin_profile, self.t_step, self.t_profile_length, + generate_elec_demand=('h0', 0)) + + elif path_item == 'demand_heat': + input_profiles[each_ca]['demand_heat'] = \ + ts.generate_profile(self.t_begin_profile, self.t_step, self.t_profile_length, + generate_therm_demand=('EFH', 0, input_profiles[each_ca]['temperature']), + generate_water_demand=('EFH', 0, input_profiles[each_ca]['temperature'])) + + # Generate prediction time series + # todo: nicer coding required. generic code for all input timeseries + for path_item in ca_data_path_dict[each_ca]: + if path_item == 'demand_electric': + predict_electric_demand = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + elec_demand=input_profiles[each_ca]['demand_electricity']['elec_demand'], + prediction=self.prediction_methods['demand_electric']) + # Create final input time series by ursing current values + predict_electric_demand['elec_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['demand_electricity']['elec_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_electric_demand) + elif path_item == 'demand_heat': + predict_heat_demand = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + therm_demand=input_profiles[each_ca]['demand_heat']['therm_demand'], + hot_water_demand=input_profiles[each_ca]['demand_heat']['hot_water_demand'], + prediction=self.prediction_methods['demand_heat']) + # Create final input time series by ursing current values + predict_heat_demand['therm_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['demand_heat']['therm_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Create final input time series by ursing current values + predict_heat_demand['hot_water_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['demand_heat']['hot_water_demand'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_heat_demand) + elif path_item == 'temperature': + predict_temperature = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + air_temperature=input_profiles[each_ca]['temperature'], + prediction=self.prediction_methods['temperature']) + # Create final input time series by ursing current values + predict_temperature['air_temperature'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['temperature'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_temperature) + elif path_item == 'day_ahead_price': + predict_day_ahead = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + elec_price=input_profiles[each_ca]['day_ahead_price'], + prediction=self.prediction_methods['day_ahead_price']) + # Create final input time series by ursing current values + predict_day_ahead['elec_price'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['day_ahead_price'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_day_ahead) + elif path_item == 'intraday_price': + predict_intraday = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + elec_price=input_profiles[each_ca]['intraday_price'], + prediction=self.prediction_methods['intraday_price']) + # Create final input time series by ursing current values + predict_intraday['elec_price'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['intraday_price'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_intraday) + elif path_item == 'irradiance': + predict_irradiance = ts.generate_profile( + self.t_start, self.t_step, self.t_horizon, + irradiance=input_profiles[each_ca]['irradiance'], + prediction=self.prediction_methods['solar_radiation']) + # Create final input time series by ursing current values + predict_irradiance['irradiance'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] = \ + input_profiles[each_ca]['irradiance'][ + self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + # Add timseries to prosumer model + self.comm_assets[each_ca].add_profile(predict_irradiance) + + # Aggregate profiles before adding them to prosumer if mode is selected + if aggregation == True: + print("Started with (!!!) aggregation!") + + predict_electric_demand['elec_demand'][self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)]= \ + input_profiles[each_ca]['demand_electricity']['elec_demand'][self.t_start:self.t_start + pd.Timedelta(hours=self.t_current_value_length - 1)] + + ### + # Import aggregation module and define aggregation parameters + import temporal_aggregator.aggregation_module as am + cluster_method = 'hierarchical' + extreme_period_method = "replace_cluster_center" + num_of_typical_periods = 64 + # Note: if 24/hoursPerPeriod has more than 2 digits after the decimal point errors may occur + hoursPerPeriod = 24 + # Create aggregator object + aggregator = am.Aggregator( + {**predict_electric_demand, **predict_heat_demand, **predict_temperature, **predict_day_ahead, + **predict_intraday, **predict_irradiance}, clusterMethods=[cluster_method]) + # peakMin_list = aggregator.getInputKeys() + # peakMax_list = aggregator.getInputKeys() + peakMin_list = [] + peakMax_list = [] + meanMin_list = [] + meanMax_list = [] + # Aggregate time series + aggregator.aggregate(num_of_typical_periods, hoursPerPeriod, + _extremePeriodMethod=extreme_period_method, _peakMin=peakMin_list, \ + _peakMax=peakMax_list, _meanMin=meanMin_list, _meanMax=meanMax_list) + # Read aggregated time series + aggregated_profiles = aggregator.getAggregationsDict()[cluster_method] + + # Adapt time horizon + index = list(list(aggregated_profiles.values())[0].index) + self.t_start = index[0].strftime("%Y-%m-%d %H:%M:%S") + self.t_horizon = len(index) * 24 / hoursPerPeriod # time in [h] + self.t_step = 24 / hoursPerPeriod # time step in [h] + + self.comm_assets[each_ca].set_dates(self.t_start, self.t_horizon, t_step=self.t_step) + + # Add aggregated profiles to prosumer + self.comm_assets[each_ca].add_profile(aggregated_profiles) + else: # return error + raise ValueError('Mode can only be 1 or 2.') + self.connect_with_db = end_db - start_db + self.prosumer_name_list = self.name_list # save prosumer names in a list + + # Set components from matrix + if mode == 1: + self.prosumer[self.name_list[0]].set_components(sector_matrices[self.name_list[0]], self.storage_states, comp_dict[self.name_list[0]]) + global_irradiance, temperature = use_db.config_profiles(input_profiles[name_list[0]], self.t_step) + + def run_optimization(self, name_list): + # ToDo: callback, abbruchbedingung, maybe check the feasebility of the model before building, + # häufige fehler bei der eingabe prüfen usw + for each_prosumer in name_list: + self.comm_assets[each_prosumer].run_optimization(strategy_name=['annuity'], + solver_name='gurobi', + commentary=self.commentary) + + def show_results(self, name_list, inter_results, final_iteration): + if not os.path.exists('output_files'): + os.makedirs('output_files') + # Results + for each_prosumer in name_list: + self.charge_status[each_prosumer] = {} + t1 = time.time() + self.comm_assets[each_prosumer].show_yourself()['_BaseProsumer__model'].write( + 'output_files/model.lp', io_options={'symbolic_solver_labels': True}) + t2 = time.time() + #print('show_yourself took [s]:\t' + str(t2 - t1)) + + # ToDo: wrap errors, exceptions in all functions + # Results df + t3 = time.time() + results = self.comm_assets[each_prosumer].get_results() + all_results = results[0] + t4 = time.time() + #print('get_results took [s]:\t' + str(t4 - t3)) + + # calculate number of results that have to be extracted for current loop of rolling horizon + if final_iteration: + amount_results = int((self.t_end - self.t_start) / np.timedelta64(self.t_step, 'h')) + else: + amount_results = self.t_shift + # Check if interim results need to be merged + if inter_results: + # Results already exist - dictionaries need to be merged + self.interim_results[each_prosumer] = pd.concat([inter_results[each_prosumer], all_results.head(amount_results)], axis=0, ignore_index=True) + else: + # First results - no need to merge + self.interim_results[each_prosumer] = all_results.head(amount_results) + + # Only print results when end of simulation is reached (after last rolling horizon interval) + if final_iteration: + full_results = self.interim_results[each_prosumer] + t5 = time.time() + full_results.to_pickle('output_files/results_' + each_prosumer + '.pkl') + # todo: make it one excel file with several pages + with pd.ExcelWriter('output_files/results_' + each_prosumer + '.xlsx') as writer: + full_results.to_excel(writer) + t6 = time.time() + #print('write_excel took [s]:\t' + str(t6 - t5)) + + if self.commentary: + print(self.comm_assets[each_prosumer].get_payoff_table()) + print(self.comm_assets[each_prosumer].get_pareto_values()) + + self.comm_assets[each_prosumer].plt_pareto_front(dpi=100) + + # Save charge status of storage for next iteration of rolling horizon + # extract all storage components + all_comp = self.comm_assets[each_prosumer].get_components() + for components in all_comp['Components']: + comp_object = all_comp['Components'][components] + comp_type = comp_object.get_type() + if comp_type == 'Storage': + self.charge_status[each_prosumer][components[0]] = all_results.iloc[int(self.t_current_value_length)-1][('energy', components[0])]\ + /all_results.iloc[int(self.t_current_value_length)-1][('cap', components[0])] + + # prosumer.plt_results(('grd', 'elec_cns'), 'ElectricityPrice', plt_type='weekday') + # prosumer.plt_results(('ehp', 'water_tes'), 'ElectricityPrice', plt_type='weekday') + # prosumer.plt_results(('E', 'bat'), ('pv', 'inv'), ('inv', 'grd'), ('inv', 'bat'), ('inv', 'elec_cns'), + # ('excess', 'inv'), name='electrical_flows', dpi=100) + # prosumer.plt_results(('pv', 'invpv'), ('invpv', 'grd'), ('invbat', 'bat'), ('invpv', 'elec_cns'), + # ('grd', 'elec_cns')) # for the slide + # prosumer.plt_results(('ehp', 'water_tes'), ('st', 'water_tes'), plt_type='hour') + # prosumer.plt_results(('pv', 'inv'), ('grd', 'elec_cns'), plt_type='hour', name='pv_vs_power_injection', dpi=100) + # prosumer.plt_results(('pv', 'inv'), ('E', 'bat'), plt_type='hour') + # prosumer.get_pareto_values().to_csv('output_files/pareto_values.csv') + # prosumer.get_payoff_table().to_csv('output_files/payoff_table.csv') + # prosumer.plt_results(('pv', 'inv'), ('inv', 'grd'), ('excess', 'inv'), ('inv', 'elec_cns'), ('fc', 'elec_cns')) + # prosumer.plt_results(('pv', 'inv'), ('inv', 'grd'), ('elec_cns', 'electroly'), ('inv', 'elec_cns'), + # ('fc', 'elec_cns'), ('elec_cns', 'elec_dmd'), name='electrical components', dpi=100) + # prosumer.plt_results(('H2_E', 'prss_st'), ('elec_cns', 'electroly'), ('fc', 'elec_cns'), ('fc', 'water_tes'), + # name='hydrogen components', dpi=100) + # # prosumer.plt_results(('energy', 'water_tes'), ('gas_boi', 'water_tes'), ('fc', 'water_tes'), ('water_tes', 'therm_cns'), + # # ('water_tes', 'water_cns'), name='thermal components', dpi=100) + # prosumer.plt_results(('pv', 'inv'), ('inv', 'grd'), ('elec_cns', 'electroly'), ('gas_boi', 'water_tes'), + # ('grd', 'elec_cns')) + # prosumer.plt_results(('energy', 'water_tes'), ('gas_boi', 'water_tes'), ('fc', 'water_tes'), ('water_tes', 'therm_cns'), + # ('water_tes', 'water_cns'), ('grd', 'elec_cns')) + # prosumer.plt_results(('H2_E', 'prss_st'), ('fc', 'elec_cns'), ('elec_cns', 'electroly'), ('inv', 'grd'), + # name='hydrogen-elec_grid', dpi=100) + # prosumer.plt_results(('H2_E', 'prss_st'), ('fc', 'elec_cns'), ('elec_cns', 'electroly'), ('inv', 'grd'), name='hydrogen-elec_grid', dpi=100) + + def show_results_agents(self, agents_list, folder): + """ + Stores the prosumers results. The data includes information about the initially calculated schedule and the + deviations from CA usage or community-sharing. + + Parameters + ---------- + ps_list: list with prosumer objects + folder: name of the folder where the data is stored + + """ + for each_agent in agents_list: + results = self.comm_assets[each_agent].get_results() + with pd.ExcelWriter('output_files/' + folder + '/results_' + str(each_agent) + '.xlsx') as writer: + for rsl in results: + results[rsl].to_excel(writer, sheet_name=str(rsl)) + + def show_results_cc(self, ca_list, folder): + """ + Stores the community assets results. The data includes information aschedules before and after community sharing + ToDo JBR: just one show_results method + + Parameters + ---------- + ca_list: list of objects of BaseProsumer class that represent community assets + folder: place to store data + """ + for each_ca in ca_list: + results = self.prosumer[each_ca].get_results() + with pd.ExcelWriter('output_files/' + folder + '/results_' + str(each_ca) + '.xlsx') as writer: + for rsl in results: + results[rsl].to_excel(writer, sheet_name=str(rsl)) \ No newline at end of file diff --git a/Prosumer/model/BaseProsumer.py b/Prosumer/model/BaseProsumer.py index 0edd19d66495979a92f00a4c4e4e53abf108d40b..ab8ff7dc9639bdd6b27764feeaa9600012968281 100644 --- a/Prosumer/model/BaseProsumer.py +++ b/Prosumer/model/BaseProsumer.py @@ -52,27 +52,27 @@ class BaseProsumer: :param prosumer_configuration: dictionary containing the prosumer configuration can be formulated according to the needs of the EMS """ - self.__name = name - self.__configuration = prosumer_configuration - self.__flows = {'electricity': {}, 'heat': {}, 'gas': {}, 'hydrogen': {}, 'solar': {}} - self.__components = dict() - self.__plant_parameters = dict() - self.__var_dict = dict() - self.__connection_list = list() #to use it for reoptimization - self.__input_profiles = dict() - self.__strategy_list = list() - self.__time_steps = list() - self.__start_date = None - self.__time_horizon = None - self.__model_resolution = None - self.__model = None - self.__model_validation = None - self.__rsl = None - self.__solver_result = None - self.__payoff = None - self.__pareto_values = None - self.__graph = dict() - self.__component_properties = dict() + self._name = name + self._configuration = prosumer_configuration + self._flows = {'electricity': {}, 'heat': {}, 'gas': {}, 'hydrogen': {}, 'solar': {}} + self._components = dict() + self._plant_parameters = dict() + self._var_dict = dict() + self._connection_list = list() #to use it for reoptimization + self._input_profiles = dict() + self._strategy_list = list() + self._time_steps = list() + self._start_date = None + self._time_horizon = None + self._model_resolution = None + self._model = None + self._model_validation = None + self._rsl = None + self._solver_result = None + self._payoff = None + self._pareto_values = None + self._graph = dict() + self._component_properties = dict() def set_components(self, sector_matrices, initial_states, *args): """ @@ -95,7 +95,7 @@ class BaseProsumer: # todo jgn: the plant parameter dictionary should be removed and all parameters # should be stored in database and read automatically for i in range(len(sector_matrices[sector]['comp_name'])): - self.__plant_parameters[ + self._plant_parameters[ (sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] = {} df_holder = sector_matrices[sector].drop( ['comp_name', 'comp_type', 'model'] + [item for item in sector_matrices[sector]['comp_name']], @@ -103,7 +103,7 @@ class BaseProsumer: for j in range(df_holder.shape[1]): # check for nan, nan != nan if df_holder.iloc[i, j] == df_holder.iloc[i, j]: - self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector] + self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector] ['comp_type'][i])][df_holder.columns[j]] = df_holder.iloc[i, j] # Instantiate the classes, there can be several components from the same type but different name @@ -120,23 +120,23 @@ class BaseProsumer: properties = args[0][sector_matrices[sector]['comp_name'][i]] else: raise TypeError('Only the optional parameter is comp_dict is allowed for set_components.') - self.__component_properties[sector_matrices[sector]['comp_name'][i]] = properties + self._component_properties[sector_matrices[sector]['comp_name'][i]] = properties # todo jgn: this can be put into data pre-processing! (Feasibility check) - min_size_exist = 'min_size' in self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] - max_size_exist = 'max_size' in self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] - cur_size_exist = 'current_size' in self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] + min_size_exist = 'min_size' in self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] + max_size_exist = 'max_size' in self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] + cur_size_exist = 'current_size' in self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] if (not hasattr(sector_matrices[sector]['comp_type'][i], 'name')) and min_size_exist and max_size_exist and cur_size_exist: # If previous rolling horizon exists replace initial storage SOC in properties if 'init soc' in properties.keys() and initial_states: component_name = sector_matrices[sector]['comp_name'][i] properties.loc[0, 'init soc'] = initial_states[component_name] # if the component objective is not instantiated yet, and the size variables all exist! - self.__components[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] = components[ + self._components[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])] = components[ sector_matrices[sector]['comp_type'][i]](comp_name=sector_matrices[sector]['comp_name'][i], - min_size=self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['min_size'], - max_size=self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['max_size'], - current_size=self.__plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['current_size'], - prosumer_config=self.__configuration, + min_size=self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['min_size'], + max_size=self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['max_size'], + current_size=self._plant_parameters[(sector_matrices[sector]['comp_name'][i], sector_matrices[sector]['comp_type'][i])]['current_size'], + prosumer_config=self._configuration, properties=properties) @@ -176,18 +176,18 @@ class BaseProsumer: # Assign variables to var_dict from local_var_dict, the distinction is needed in order to define exclusively # electrical or thermal flows - self.__var_dict.update(local_var_dict) + self._var_dict.update(local_var_dict) for key in local_var_dict.keys(): - self.__connection_list.append(key) + self._connection_list.append(key) # Assign flow - self.__flows[sector] = flows + self._flows[sector] = flows def get_components(self): """ The components and inner connections should be protected properties and only accessible through this method. :return: Dictionary with all component and flow attributes """ - return {'Components': self.__components, 'Plant Parameters': self.__plant_parameters} + return {'Components': self._components, 'Plant Parameters': self._plant_parameters} def remove_components(self, sector): """ @@ -196,7 +196,7 @@ class BaseProsumer: """ # ToDo: also flows and variables (unusual functionality, development put off) if sector == 'all': - self.__components = dict() + self._components = dict() def add_strategy(self, strategy): """ @@ -205,8 +205,8 @@ class BaseProsumer: designated when trying to run the model and the EMS knows what each strategy means. Only strategies in the list can be implemented """ - self.__strategy_list.extend(strategy) - self.__strategy_list = list(dict.fromkeys(self.__strategy_list)) # remove duplicates from the strategy list + self._strategy_list.extend(strategy) + self._strategy_list = list(dict.fromkeys(self._strategy_list)) # remove duplicates from the strategy list def get_strategy(self): """ @@ -214,17 +214,17 @@ class BaseProsumer: list can be implemented :return: The current _StrategyList """ - return self.__strategy_list + return self._strategy_list def remove_strategy(self, *args): """ The method remove_strategy removes all given strategies from the strategy list, if the strategy is in the list. :param args: Strategy names. 'all' to remove all strategies from the strategy list """ - self.__strategy_list = [item for item in self.__strategy_list if - item not in list(args)] + self._strategy_list = [item for item in self._strategy_list if + item not in list(args)] if 'all' in list(args): - self.__strategy_list = list() + self._strategy_list = list() def set_dates(self, t_start, t_horizon, t_step=1): """ @@ -237,18 +237,18 @@ class BaseProsumer: :param t_horizon: Time horizon for the optimization as an integer in hour [h] :param t_step: time steps, i.e. resolution of the model in [h] """ - self.__start_date = pd.Timestamp(t_start) - self.__time_horizon = t_horizon - self.__model_resolution = t_step + self._start_date = pd.Timestamp(t_start) + self._time_horizon = t_horizon + self._model_resolution = t_step time_steps = [] - for t in pd.date_range(self.__start_date, self.__start_date + - timedelta(hours=self.__time_horizon) - timedelta(hours=t_step), - freq=str(self.__model_resolution)+'H'): + for t in pd.date_range(self._start_date, self._start_date + + timedelta(hours=self._time_horizon) - timedelta(hours=t_step), + freq=str(self._model_resolution) + 'H'): time_steps.append(t) - self.__time_steps = time_steps + self._time_steps = time_steps # todo jgn: the plant parameter should be completely removed - self.__plant_parameters['t_step'] = t_step + self._plant_parameters['t_step'] = t_step # self.__plant_parameters['freq_factor'] = self.__freq_factor def add_profile(self, profile_dict): @@ -262,9 +262,9 @@ class BaseProsumer: # ToDo: We have to define the names of the possible profiles beforehand, otherwise we cannot assign them to the # component. We have been using the conventions 'irradiance', 'demand', 'prices' for profile in profile_dict: - self.__input_profiles[profile] = profile_dict[profile].squeeze() - print('The maximum ' + profile + ' is:' + str(self.__input_profiles[profile].max())) - print('The average ' + profile + ' is:' + str(self.__input_profiles[profile].mean())) + self._input_profiles[profile] = profile_dict[profile].squeeze() + print('The maximum ' + profile + ' is:' + str(self._input_profiles[profile].max())) + print('The average ' + profile + ' is:' + str(self._input_profiles[profile].mean())) print("==========================================") def get_profile(self, profile_name='all'): @@ -273,7 +273,7 @@ class BaseProsumer: method by giving the ProfileName, ProfileName="all" will return all profiles. :return: Input profiles """ - return self.__input_profiles[profile_name] if profile_name is not 'all' else self.__input_profiles + return self._input_profiles[profile_name] if profile_name is not 'all' else self._input_profiles def remove_profile(self, *args): """ @@ -298,37 +298,37 @@ class BaseProsumer: The building phase can fail if data is not complete. Raise Error in this case and report what is missing. :return: """ - self.__model = pyo.ConcreteModel(self.__name) - self.__model.cons = pyo.ConstraintList() + self._model = pyo.ConcreteModel(self._name) + self._model.cons = pyo.ConstraintList() # Add flow dependent decision variables - for var in self.__var_dict: + for var in self._var_dict: # Till here, var_dict only contains power flow variables, thus all time related - for t in self.__time_steps: - self.__var_dict[var][t] = pyo.Var(bounds=(0, None)) - self.__model.add_component(var[0] + '_' + var[1] + "_%s" % t, - self.__var_dict[var][t]) + for t in self._time_steps: + self._var_dict[var][t] = pyo.Var(bounds=(0, None)) + self._model.add_component(var[0] + '_' + var[1] + "_%s" % t, + self._var_dict[var][t]) # Add component dependent decision variables - for comp in self.__components: - self.__components[comp].add_variables(self.__input_profiles, - self.__plant_parameters, - self.__var_dict, - self.__flows, self.__model, - self.__time_steps) + for comp in self._components: + self._components[comp].add_variables(self._input_profiles, + self._plant_parameters, + self._var_dict, + self._flows, self._model, + self._time_steps) # Add component dependent constraints - for comp in self.__components: - self.__components[comp].add_all_constr(self.__model, self.__flows, - self.__var_dict, - self.__time_steps) + for comp in self._components: + self._components[comp].add_all_constr(self._model, self._flows, + self._var_dict, + self._time_steps) # Instantiate EMS component and implement the strategies - if set(strategy_name).issubset(self.__strategy_list): - ems = components_list['EnergyManagementSystem'](self.__name, self.__configuration, strategy_name, - self.__plant_parameters, self.__flows, self.__components, - self.__component_properties, self.__input_profiles) - ems.implement_strategy(self.__model, self.__var_dict, self.__time_steps) + if set(strategy_name).issubset(self._strategy_list): + ems = components_list['EnergyManagementSystem'](self._name, self._configuration, strategy_name, + self._plant_parameters, self._flows, self._components, + self._component_properties, self._input_profiles) + ems.implement_strategy(self._model, self._var_dict, self._time_steps) else: print('Not all strategies are defined for this prosumer. Use add_strategy to complete the strategy list.') @@ -343,29 +343,29 @@ class BaseProsumer: # ToDo: also extract non time dependent variables (cycle variable) issubclass pyomo # time variable can be overwritten if the reoptimization is done if not time: - time = self.__time_steps + time = self._time_steps results_lst = [] - columns = ['TimeStep'] + [item for item in self.__input_profiles.keys()] + list(self.__var_dict.keys()) + columns = ['TimeStep'] + [item for item in self._input_profiles.keys()] + list(self._var_dict.keys()) for t in time: profile_lst = [] - for profile in self.__input_profiles: - profile_lst.append(self.__input_profiles[profile][t]) + for profile in self._input_profiles: + profile_lst.append(self._input_profiles[profile][t]) local_lst = [t]+profile_lst # self.__input_profiles['prices'][t]] - for var in self.__var_dict: - if issubclass(type(self.__var_dict[var]), dict) and len(self.__var_dict[var]) > 0: - local_lst.append(pyo.value(self.__var_dict[var][pd.Timestamp(t)])) - elif issubclass(type(self.__var_dict[var]), pd.Series): - local_lst.append(self.__var_dict[var][t]) - elif issubclass(type(self.__var_dict[var]), pyo.Var): - local_lst.append(pyo.value(self.__var_dict[var])) + for var in self._var_dict: + if issubclass(type(self._var_dict[var]), dict) and len(self._var_dict[var]) > 0: + local_lst.append(pyo.value(self._var_dict[var][pd.Timestamp(t)])) + elif issubclass(type(self._var_dict[var]), pd.Series): + local_lst.append(self._var_dict[var][t]) + elif issubclass(type(self._var_dict[var]), pyo.Var): + local_lst.append(pyo.value(self._var_dict[var])) else: # issubclass(type(self.__var_dict[var]), float): - local_lst.append(self.__var_dict[var]) + local_lst.append(self._var_dict[var]) results_lst.append(local_lst) # self.__model.solutions.load_from(self.__solver_result) if save_local_file: f = open("output_files/variable_result.txt", "w") - for v in self.__model.component_objects(pyo.Var, active=True): + for v in self._model.component_objects(pyo.Var, active=True): f.write("Variable " + str(v)) for index in v: f.write(" " + str(index) + str(pyo.value(v[index]))) @@ -396,66 +396,66 @@ class BaseProsumer: # in order to install a new solver paste the .exe file in env. path 'C:\Users\User\anaconda3\envs\envINEED' if len(strategy_name) == 1: # solver.options['NonConvex'] = 2 # only for gurobi nlp - self.__solver_result = solver.solve(self.__model, tee=commentary) - if (self.__solver_result.solver.status == SolverStatus.ok) and ( - self.__solver_result.solver.termination_condition == TerminationCondition.optimal): - self.__rsl = {0: self._extract_results()} - self.__rsl[0] = self.__rsl[0].set_index(['TimeStep'], drop=False) - output_ps, input_ps = self.get_grd_exchange(self.__rsl[0], self.__time_steps) - self.__rsl[0]['grd_exchange'] = output_ps - input_ps + self._solver_result = solver.solve(self._model, tee=commentary) + if (self._solver_result.solver.status == SolverStatus.ok) and ( + self._solver_result.solver.termination_condition == TerminationCondition.optimal): + self._rsl = {0: self._extract_results()} + self._rsl[0] = self._rsl[0].set_index(['TimeStep'], drop=False) + output_ps, input_ps = self.get_grd_exchange(self._rsl[0], self._time_steps) + self._rsl[0]['grd_exchange'] = output_ps - input_ps else: print('ERROR: The model is infeasible or unbounded: no optimal solution found') elif len(strategy_name) > 1: # using Augmented Epsilon Constraint as presented in Mavrotas 2009: Effective implmentation of the e-constraint # method in Multi-Objective Mathematical Programming problems - self.__model.O_f2.deactivate() # deactivates second objective function + self._model.O_f2.deactivate() # deactivates second objective function # solve for first iteration of max f1 - solver.solve(self.__model, tee=commentary) + solver.solve(self._model, tee=commentary) print('Non pareto optimal solution of max f1') # print('( X1 , X2 ) = ( ' + str(pyo.value(model.X1)) + ' , ' + str(pyo.value(model.X2)) + ' )') - print('f1 = ' + str(pyo.value(self.__model.f1))) - print('f2 = ' + str(pyo.value(self.__model.f2))) + print('f1 = ' + str(pyo.value(self._model.f1))) + print('f2 = ' + str(pyo.value(self._model.f2))) - f1_max = pyo.value(self.__model.f1) + f1_max = pyo.value(self._model.f1) # max f2 - self.__model.O_f2.activate() # activate the second objective function - self.__model.O_f1.deactivate() # deactivate the first objective function + self._model.O_f2.activate() # activate the second objective function + self._model.O_f1.deactivate() # deactivate the first objective function ## restrict the first objective to be its maximum and solve the second - self.__model.C4 = pyo.Constraint(expr=self.__model.f1 == f1_max) + self._model.C4 = pyo.Constraint(expr=self._model.f1 == f1_max) - solver.solve(self.__model, tee=commentary) + solver.solve(self._model, tee=commentary) payoff_table = {'f1': [], 'f2': []} print('Pareto optimal (lexicographic) solution of max f1: payoff table') # print('( X1 , X2 ) = ( ' + str(value(model.X1)) + ' , ' + str(value(model.X2)) + ' )') - print('f1 = ' + str(pyo.value(self.__model.f1))) - payoff_table['f1'].append(pyo.value(self.__model.f1)) - print('f2 = ' + str(pyo.value(self.__model.f2))) - payoff_table['f2'].append(pyo.value(self.__model.f2)) - f2_min = pyo.value(self.__model.f2) + print('f1 = ' + str(pyo.value(self._model.f1))) + payoff_table['f1'].append(pyo.value(self._model.f1)) + print('f2 = ' + str(pyo.value(self._model.f2))) + payoff_table['f2'].append(pyo.value(self._model.f2)) + f2_min = pyo.value(self._model.f2) ## cancel the restriction and resolve the second objective function - self.__model.C4.deactivate() + self._model.C4.deactivate() - solver.solve(self.__model, tee=commentary) + solver.solve(self._model, tee=commentary) print('Optimal solution of max f2 (not necessary pareto optimal but irrelevant since we only need the values of f2 for ' 'pareto set: payoff table') # print('( X1 , X2 ) = ( ' + str(value(model.X1)) + ' , ' + str(value(model.X2)) + ' )') - print('f1 = ' + str(pyo.value(self.__model.f1))) - payoff_table['f1'].append(pyo.value(self.__model.f1)) - print('f2 = ' + str(pyo.value(self.__model.f2))) - payoff_table['f2'].append(pyo.value(self.__model.f2)) + print('f1 = ' + str(pyo.value(self._model.f1))) + payoff_table['f1'].append(pyo.value(self._model.f1)) + print('f2 = ' + str(pyo.value(self._model.f2))) + payoff_table['f2'].append(pyo.value(self._model.f2)) - self.__payoff = pd.DataFrame(payoff_table, index=['maxf1', 'maxf2']) + self._payoff = pd.DataFrame(payoff_table, index=['maxf1', 'maxf2']) - f2_max = pyo.value(self.__model.f2) + f2_max = pyo.value(self._model.f2) if f2_min > f2_max: f2_max = f2_min @@ -474,28 +474,28 @@ class BaseProsumer: # max f2 + delta*epsilon # s.t. f2 - s = e - self.__model.del_component(self.__model.O_f1) - self.__model.del_component(self.__model.O_f2) + self._model.del_component(self._model.O_f1) + self._model.del_component(self._model.O_f2) - self.__model.e = pyo.Param(initialize=0, mutable=True) + self._model.e = pyo.Param(initialize=0, mutable=True) - self.__model.eps = pyo.Param(initialize=0.00001) + self._model.eps = pyo.Param(initialize=0.00001) r2 = f2_max - f2_min if r2 == 0: r2 = 1 - self.__model.r2 = pyo.Param(initialize=r2) + self._model.r2 = pyo.Param(initialize=r2) # Define slack variable for f2 - self.__model.s2 = pyo.Var(bounds=(0, None)) + self._model.s2 = pyo.Var(bounds=(0, None)) - self.__model.O_f1 = pyo.Objective( - expr=self.__model.f1 + self.__model.eps * self.__model.s2 / self.__model.r2, + self._model.O_f1 = pyo.Objective( + expr=self._model.f1 + self._model.eps * self._model.s2 / self._model.r2, sense=pyo.maximize) - self.__model.C_e = pyo.Constraint( - expr=self.__model.f2 - self.__model.s2 == self.__model.e) + self._model.C_e = pyo.Constraint( + expr=self._model.f2 - self._model.s2 == self._model.e) f1_l = [] f2_l = [] @@ -503,34 +503,34 @@ class BaseProsumer: j = 0 for i in tqdm(steps): - self.__model.e = i - self.__solver_result = solver.solve(self.__model, - tee=commentary) - if (self.__solver_result.solver.status == SolverStatus.ok) and ( - self.__solver_result.solver.termination_condition == TerminationCondition.optimal): - f1_l.append(pyo.value(self.__model.f1)) - f2_l.append(pyo.value(self.__model.f2)) + self._model.e = i + self._solver_result = solver.solve(self._model, + tee=commentary) + if (self._solver_result.solver.status == SolverStatus.ok) and ( + self._solver_result.solver.termination_condition == TerminationCondition.optimal): + f1_l.append(pyo.value(self._model.f1)) + f2_l.append(pyo.value(self._model.f2)) pareto_set_rsl[j] = self._extract_results() j += 1 else: print( 'ERROR: The model is infeasible or unbounded: no optimal solution found') - self.__rsl = pareto_set_rsl - self.__pareto_values = pd.DataFrame({'f1': f1_l, 'f2': f2_l}) + self._rsl = pareto_set_rsl + self._pareto_values = pd.DataFrame({'f1': f1_l, 'f2': f2_l}) def plt_pareto_front(self, dpi=500): """ Plot the pareto optimal (efficient) solutions of the objective functions. """ - if self.__pareto_values is None: + if self._pareto_values is None: pass # print('There are no pareto values to be plotted') else: fig, base_plt = plt.subplots(figsize=(16, 9)) # here f1 is set negative because we are maximizing the negative value of co2 emissions # todo jgn: this plot is hard coded for f1: CO2 optimization - base_plt.plot(-self.__pareto_values['f1'], self.__pareto_values['f2'], 'o-.', linewidth=0.5) + base_plt.plot(-self._pareto_values['f1'], self._pareto_values['f2'], 'o-.', linewidth=0.5) plt.title('Efficient Pareto-Front') plt.grid(True) plt.xlabel('f1: CO2 emissions in kg') # change to f1 and f2 later @@ -546,14 +546,14 @@ class BaseProsumer: Results are protected and not changeable. Can only be accessed through this method. :return: self.__pareto_values """ - return self.__pareto_values + return self._pareto_values def get_payoff_table(self): """ Results are protected and not changeable. Can only be accessed through this method. :return: self.__payoff """ - return self.__payoff + return self._payoff def get_battery_aging(self): # todo jgn: consider whether get_battery_aging should be a method of battery model @@ -562,11 +562,11 @@ class BaseProsumer: :return: aging dictionary """ aging = {} - for component in self.__components: + for component in self._components: if component[1] == 'LiionBattery': - if self.__plant_parameters[component]['min_size'] == self.__plant_parameters[component]['max_size']: - aging_f = self.__plant_parameters[component]['aging_factor'] - cycles = self.__rsl[0][('cum_cycle', component[0])][self.__time_horizon-1] + if self._plant_parameters[component]['min_size'] == self._plant_parameters[component]['max_size']: + aging_f = self._plant_parameters[component]['aging_factor'] + cycles = self._rsl[0][('cum_cycle', component[0])][self._time_horizon - 1] aging[component] = aging_f**cycles else: print('Aging could not be calculated since battery size is being optimized. To calculate battery ' @@ -578,7 +578,7 @@ class BaseProsumer: Results are protected and not changeable. Can only be accessed through this method. :return: self.__rsl """ - return self.__rsl + return self._rsl def get_correction_factors(self): """ @@ -607,8 +607,8 @@ class BaseProsumer: ------- two dicts with export and import values before and after DF activation """ - grd_export_before, grd_import_before = self.get_grd_exchange(self.__rsl['rescheduled'].loc[t_fixed], t_fixed) - grd_export_after, grd_import_after = self.get_grd_exchange(self.__rsl['fixed'].loc[t_fixed], t_fixed) + grd_export_before, grd_import_before = self.get_grd_exchange(self._rsl['rescheduled'].loc[t_fixed], t_fixed) + grd_export_after, grd_import_after = self.get_grd_exchange(self._rsl['fixed'].loc[t_fixed], t_fixed) return {'export': grd_export_before, 'import': grd_import_before}, {'export': grd_export_after, 'import': grd_import_after} @@ -630,9 +630,9 @@ class BaseProsumer: in_flows = pd.Series(data=0, index=time_steps) out_flows = pd.Series(data=0, index=time_steps) try: - for in_flow in self.__flows['electricity'][name][0]: + for in_flow in self._flows['electricity'][name][0]: in_flows = in_flows + rsl[in_flow] - for out_flow in self.__flows['electricity'][name][1]: + for out_flow in self._flows['electricity'][name][1]: out_flows = out_flows + rsl[out_flow] except KeyError: pass @@ -661,7 +661,7 @@ class BaseProsumer: # get the grid name try: - grd_name = [comp for comp in self.__components if comp[1] == 'StandardACGrid'][0][0] + grd_name = [comp for comp in self._components if comp[1] == 'StandardACGrid'][0][0] except: print('The grid for starting the dfs search was not found') return @@ -670,11 +670,11 @@ class BaseProsumer: visited = set() results = self.temp_schedule - init_results = self.__rsl[0] + init_results = self._rsl[0] # make the DFS first for the electricity side, afterwards for heat/demand side try: - elec_flex_from_inv, c_f_inv = self.dfs(results, visited, self.__graph, grd_name, time_steps, init_results, act_path=[], type='electricity') + elec_flex_from_inv, c_f_inv = self.dfs(results, visited, self._graph, grd_name, time_steps, init_results, act_path=[], type='electricity') except TypeError: print('There is no flexibility from Storage or PV') @@ -699,12 +699,12 @@ class BaseProsumer: self.temp_flex['p_flex_neg_dec_inv'] += elec_flex_from_inv['p_flex_neg_dec'][time_steps] # reset the grid values so that the additional flexibility is seen seperately - grd_comp = self.__components[(grd_name, 'StandardACGrid')] + grd_comp = self._components[(grd_name, 'StandardACGrid')] for col in grd_comp.flex.columns: grd_comp.flex[col].values[:] = 0.0 try: - elec_flex_from_dmd, c_f_dmd = self.dfs(results, visited, self.__graph, grd_name, time_steps, init_results, act_path=[], type='heat') + elec_flex_from_dmd, c_f_dmd = self.dfs(results, visited, self._graph, grd_name, time_steps, init_results, act_path=[], type='heat') flex_dmd = 1 except TypeError: flex_dmd = 0 @@ -713,10 +713,10 @@ class BaseProsumer: # combine correction factors in one dict if not flex_dmd: c_f_dmd = dict() - c_f_dmd['c_f_dch'] = pd.Series(data=0, index=self.__time_steps) - c_f_dmd['c_static_dch'] = pd.Series(data=0, index=self.__time_steps) - c_f_dmd['c_f_cha'] = pd.Series(data=0, index=self.__time_steps) - c_f_dmd['c_static_cha'] = pd.Series(data=0, index=self.__time_steps) + c_f_dmd['c_f_dch'] = pd.Series(data=0, index=self._time_steps) + c_f_dmd['c_static_dch'] = pd.Series(data=0, index=self._time_steps) + c_f_dmd['c_f_cha'] = pd.Series(data=0, index=self._time_steps) + c_f_dmd['c_static_cha'] = pd.Series(data=0, index=self._time_steps) self.__c_f_dict = {'elec': c_f_inv, 'heat': c_f_dmd} @@ -739,18 +739,18 @@ class BaseProsumer: """ # transform dict flows = dict() - for key in self.__flows.keys(): - for comp in self.__flows[key]: + for key in self._flows.keys(): + for comp in self._flows[key]: if comp not in flows: flows[comp] = [0, 1] flows[comp][0] = list() flows[comp][1] = list() - for in_flow in self.__flows[key][comp][0]: + for in_flow in self._flows[key][comp][0]: if in_flow not in flows[comp][0]: flows[comp][0].append(in_flow) - for out_flow in self.__flows[key][comp][1]: + for out_flow in self._flows[key][comp][1]: if out_flow not in flows[comp][1]: flows[comp][1].append(out_flow) @@ -771,7 +771,7 @@ class BaseProsumer: if pot_neigh not in graph[comp_name]['neigh_all']: graph[comp_name]['neigh_all'].append(pot_neigh) - self.__graph = graph + self._graph = graph def dfs(self, results, visited, graph, node, time_steps, init_results, act_path, type): """ @@ -800,8 +800,8 @@ class BaseProsumer: if node not in visited: # get current node component - node_key = [comp for comp in self.__components.keys() if comp[0] == node][0] - node_comp = self.__components[node_key] + node_key = [comp for comp in self._components.keys() if comp[0] == node][0] + node_comp = self._components[node_key] # initialize flexibility df in first iteration columns_list = ['e_dch', 'p_flex_pos', 'p_flex_pos_inc', 'p_flex_pos_dec', @@ -822,7 +822,7 @@ class BaseProsumer: if not node_comp.flexible: if type == 'electricity': for neighbour in graph[node]['neigh_in']: - if neighbour not in visited and neighbour in self.__graph.keys(): + if neighbour not in visited and neighbour in self._graph.keys(): flex_pre_comp, c_f_pre_comp = self.dfs(results, visited, graph, neighbour, time_steps, init_results, act_path, type) node_comp.temp_flex += flex_pre_comp # adding the flexibility from the previous object to this component # choose the worst (DCH --> highest CHA--> lowest) correction factor of all neighbours so @@ -840,7 +840,7 @@ class BaseProsumer: elif type == 'heat': for neighbour in graph[node]['neigh_out']: - if neighbour not in visited and neighbour in self.__graph.keys(): + if neighbour not in visited and neighbour in self._graph.keys(): flex_pre_comp, c_f_pre_comp = self.dfs(results, visited, graph, neighbour, time_steps, init_results, act_path, type) node_comp.temp_flex += flex_pre_comp # adding the flexibility from the previous object to this component # choose the worst (DCH --> highest CHA--> lowest) correction factor of all neighbours so @@ -857,19 +857,19 @@ class BaseProsumer: c_f_df['c_static_cha'] = new_c_f.combine(c_f_df['c_static_cha'], max) # adjust the correction factors according to components efficiencies - c_f_df = node_comp.calc_correction_factors(self.__time_steps, c_f_df, results, self.__input_profiles) + c_f_df = node_comp.calc_correction_factors(self._time_steps, c_f_df, results, self._input_profiles) # when there are no neighbours anymore to check, calculate the flexibility of this comp itself if node_comp.flexible: # calculate the MAXIMAL THEORETICAL FLEXIBILITY in the component class - node_comp.calc_flex_comp(self.__flows, results, time_steps, init_results) + node_comp.calc_flex_comp(self._flows, results, time_steps, init_results) # check limits of non flexible comp because flexible comps intrinsically respect their # limits from flexibility calculation else: input_flows, output_flows = self.get_planned_flows(results, node_comp.name, time_steps) # Transform the flexibilities to the input side of the inflexible component - node_comp.adjust_flex_with_efficiency(results, self.__flows, self.__input_profiles, time_steps) + node_comp.adjust_flex_with_efficiency(results, self._flows, self._input_profiles, time_steps) # Check, if the maximal available flexibility could hit the power limits of this component node_comp.check_limits(input_flows, output_flows, results, time_steps) @@ -887,19 +887,19 @@ class BaseProsumer: supply: amount of electricity fed into public grid """ - flows = self.__flows['electricity'] + flows = self._flows['electricity'] - results = copy.deepcopy(self.__rsl[0]) + results = copy.deepcopy(self._rsl[0]) results.set_index(keys='TimeStep', drop=False) try: - grd_name = [comp for comp in self.__components if comp[1] == 'StandardACGrid'][0][0] + grd_name = [comp for comp in self._components if comp[1] == 'StandardACGrid'][0][0] except KeyError: print('The grid for starting the dfs search was not found') return - demand = pd.Series(data=0, index=self.__time_steps) - supply = pd.Series(data=0, index=self.__time_steps) + demand = pd.Series(data=0, index=self._time_steps) + supply = pd.Series(data=0, index=self._time_steps) for in_flow in flows[grd_name][0]: rsl_data = results[in_flow] @@ -926,12 +926,12 @@ class BaseProsumer: solver_name = 'gurobi' # different origin of results depending if the prosumer is a community component or not - rsl_init = self.__rsl[0] + rsl_init = self._rsl[0] - if t_validation[0] == self.__time_steps[0]: + if t_validation[0] == self._time_steps[0]: fixed_rsl = 0 else: - fixed_rsl = self.__rsl['fixed'].iloc[:self.__time_steps.index(t_validation[0])] + fixed_rsl = self._rsl['fixed'].iloc[:self._time_steps.index(t_validation[0])] temp_rsl = self.temp_schedule @@ -952,10 +952,10 @@ class BaseProsumer: # in order to install a new solver paste the .exe file in env. path 'C:\Users\User\anaconda3\envs\envINEED' if len(strategy_name) == 1: # solver.options['NonConvex'] = 2 # only for gurobi nlp - self.__solver_result = solver.solve(self.__model_validation, tee=False) + self._solver_result = solver.solve(self._model_validation, tee=False) - if (self.__solver_result.solver.status == SolverStatus.ok) and ( - self.__solver_result.solver.termination_condition == TerminationCondition.optimal): + if (self._solver_result.solver.status == SolverStatus.ok) and ( + self._solver_result.solver.termination_condition == TerminationCondition.optimal): new_validated_results = self._extract_results(time=t_validation) new_validated_results = new_validated_results.set_index('TimeStep', drop=True) @@ -973,16 +973,16 @@ class BaseProsumer: new_validated_results['grd_exchange'] = grid_exchange_validation # override the initial validation df so that the size fits - if t_validation[0] == self.__time_steps[0]: + if t_validation[0] == self._time_steps[0]: self.temp_validation = pd.DataFrame(columns=new_validated_results.columns, index=t_validation) self.temp_validation = new_validated_results return True else: - filename = os.path.join(os.path.dirname(__file__), 'analyze_' + self.__name + '.lp') - self.__model_validation.write(filename, io_options={'symbolic_solver_labels': True}) + filename = os.path.join(os.path.dirname(__file__), 'analyze_' + self._name + '.lp') + self._model_validation.write(filename, io_options={'symbolic_solver_labels': True}) return False - log_infeasible_constraints(self.__model_validation, log_expression=True, log_variables=True) + log_infeasible_constraints(self._model_validation, log_expression=True, log_variables=True) logging.basicConfig(filename='example.log', level=logging.INFO) print('ERROR: The model is infeasible or unbounded: no optimal solution found') @@ -1004,54 +1004,54 @@ class BaseProsumer: """ t_model = t_validation - self.__model_validation = pyo.ConcreteModel(self.__name + '_validation') - self.__model_validation.cons = pyo.ConstraintList() + self._model_validation = pyo.ConcreteModel(self._name + '_validation') + self._model_validation.cons = pyo.ConstraintList() # new empty var_dict - self.__var_dict = dict.fromkeys(self.__connection_list) - copy_flow = copy.deepcopy(self.__flows) # --> to not change the flows - for key in self.__var_dict.keys(): - self.__var_dict[key] = dict() + self._var_dict = dict.fromkeys(self._connection_list) + copy_flow = copy.deepcopy(self._flows) # --> to not change the flows + for key in self._var_dict.keys(): + self._var_dict[key] = dict() # Add flow dependent decision variables - for var in self.__var_dict: + for var in self._var_dict: # Till here, var_dict only contains power flow variables, thus all time related for t in t_model: - self.__var_dict[var][t] = pyo.Var(bounds=(0, None)) - self.__model_validation.add_component(var[0] + '_' + var[1] + "_%s" % t, - self.__var_dict[var][t]) + self._var_dict[var][t] = pyo.Var(bounds=(0, None)) + self._model_validation.add_component(var[0] + '_' + var[1] + "_%s" % t, + self._var_dict[var][t]) # Add component dependent decision variables - for comp in self.__components: - self.__components[comp].add_variables(self.__input_profiles, - self.__plant_parameters, - self.__var_dict, - copy_flow, self.__model_validation, - t_model) + for comp in self._components: + self._components[comp].add_variables(self._input_profiles, + self._plant_parameters, + self._var_dict, + copy_flow, self._model_validation, + t_model) # Add component dependent constraints - for comp in self.__components: - self.__components[comp].add_all_constr(self.__model_validation, self.__flows, - self.__var_dict, - t_model) + for comp in self._components: + self._components[comp].add_all_constr(self._model_validation, self._flows, + self._var_dict, + t_model) # Add reoptimization constraints - for comp in self.__components: - self.__components[comp].add_reopt_constr(self.__model_validation, self.__flows, - self.__var_dict, - t_model, rsl_init) + for comp in self._components: + self._components[comp].add_reopt_constr(self._model_validation, self._flows, + self._var_dict, + t_model, rsl_init) # connecting the energy level of the first time step of this RH-iteration with the last time step of the last RH-iteration - self.__components[comp].add_storage_cons(self.__model_validation, self.__var_dict, self.__flows, fixed_rsl, rsl_init, t_validation, self.__time_steps) + self._components[comp].add_storage_cons(self._model_validation, self._var_dict, self._flows, fixed_rsl, rsl_init, t_validation, self._time_steps) # constraining the grid interaction so that the flexibility is always given - self.add_grd_cons(self.__model_validation, temp_rsl, t_validation, output_power, input_power) + self.add_grd_cons(self._model_validation, temp_rsl, t_validation, output_power, input_power) - ems = components_list['EnergyManagementSystem'](self.__name, self.__configuration, strategy_name, - self.__plant_parameters, self.__flows, self.__components, - self.__component_properties, self.__input_profiles) + ems = components_list['EnergyManagementSystem'](self._name, self._configuration, strategy_name, + self._plant_parameters, self._flows, self._components, + self._component_properties, self._input_profiles) - ems.implement_strategy(self.__model_validation, self.__var_dict, t_model) + ems.implement_strategy(self._model_validation, self._var_dict, t_model) def calc_rh_step_schedule(self, t_interval, *cc_rsl): """ @@ -1073,12 +1073,12 @@ class BaseProsumer: solver_name = 'gurobi' # different origin of results depending if the prosumer is a community component or not - rsl_init = self.__rsl[0] + rsl_init = self._rsl[0] - if t_interval[0] == self.__time_steps[0]: + if t_interval[0] == self._time_steps[0]: fixed_rsl = 0 else: - fixed_rsl = self.__rsl['fixed'].iloc[:self.__time_steps.index(t_interval[0])] + fixed_rsl = self._rsl['fixed'].iloc[:self._time_steps.index(t_interval[0])] # build the mathematical validation model in the same way as before but with small changes( e.g. sel.__flows) self.build_rh_step_model(strategy_name, components, t_interval, rsl_init, fixed_rsl) @@ -1096,10 +1096,10 @@ class BaseProsumer: # in order to install a new solver paste the .exe file in env. path 'C:\Users\User\anaconda3\envs\envINEED' if len(strategy_name) == 1: # solver.options['NonConvex'] = 2 # only for gurobi nlp - self.__solver_result = solver.solve(self.__model_rh_step, tee=False) + self._solver_result = solver.solve(self.__model_rh_step, tee=False) - if (self.__solver_result.solver.status == SolverStatus.ok) and ( - self.__solver_result.solver.termination_condition == TerminationCondition.optimal): + if (self._solver_result.solver.status == SolverStatus.ok) and ( + self._solver_result.solver.termination_condition == TerminationCondition.optimal): rh_step_schedule = self._extract_results(time=t_interval) rh_step_schedule = rh_step_schedule.set_index('TimeStep', drop=True) try: @@ -1113,12 +1113,12 @@ class BaseProsumer: rh_step_schedule['grd_exchange'] = grid_exchange_temp_df # override the initial validation df so that the size fits - if t_interval[0] == self.__time_steps[0]: + if t_interval[0] == self._time_steps[0]: self.temp_schedule = pd.DataFrame(columns=rh_step_schedule.columns, index=t_interval) self.temp_schedule = rh_step_schedule return True else: - filename = os.path.join(os.path.dirname(__file__), 'analyze_' + self.__name + '.lp') + filename = os.path.join(os.path.dirname(__file__), 'analyze_' + self._name + '.lp') self.__model_rh_step.write(filename, io_options={'symbolic_solver_labels': True}) return False @@ -1136,38 +1136,38 @@ class BaseProsumer: t_model = t_interval - self.__model_rh_step = pyo.ConcreteModel(self.__name + '_rh_schedule') + self.__model_rh_step = pyo.ConcreteModel(self._name + '_rh_schedule') self.__model_rh_step.cons = pyo.ConstraintList() # new empty var_dict - self.__var_dict = dict.fromkeys(self.__connection_list) - copy_flow = copy.deepcopy(self.__flows) # --> to not change the flows - for key in self.__var_dict.keys(): - self.__var_dict[key] = dict() + self._var_dict = dict.fromkeys(self._connection_list) + copy_flow = copy.deepcopy(self._flows) # --> to not change the flows + for key in self._var_dict.keys(): + self._var_dict[key] = dict() # Add flow dependent decision variables - for var in self.__var_dict: + for var in self._var_dict: # Till here, var_dict only contains power flow variables, thus all time related for t in t_model: - self.__var_dict[var][t] = pyo.Var(bounds=(0, None)) - self.__model_rh_step.add_component(var[0] + '_' + var[1] + "_%s" % t, self.__var_dict[var][t]) + self._var_dict[var][t] = pyo.Var(bounds=(0, None)) + self.__model_rh_step.add_component(var[0] + '_' + var[1] + "_%s" % t, self._var_dict[var][t]) # Add component dependent decision variables - for comp in self.__components: - self.__components[comp].add_variables(self.__input_profiles, self.__plant_parameters, - self.__var_dict, copy_flow, self.__model_rh_step, t_model) + for comp in self._components: + self._components[comp].add_variables(self._input_profiles, self._plant_parameters, + self._var_dict, copy_flow, self.__model_rh_step, t_model) # Add component dependent constraints - for comp in self.__components: - self.__components[comp].add_all_constr(self.__model_rh_step, self.__flows, self.__var_dict, t_model) + for comp in self._components: + self._components[comp].add_all_constr(self.__model_rh_step, self._flows, self._var_dict, t_model) # Add reoptimization constraints - for comp in self.__components: - self.__components[comp].add_reopt_constr(self.__model_rh_step, self.__flows, - self.__var_dict, t_model, rsl_init) + for comp in self._components: + self._components[comp].add_reopt_constr(self.__model_rh_step, self._flows, + self._var_dict, t_model, rsl_init) # connecting the energy level of the first time step of this RH-iteration with the last time step of the last RH-iteration - self.__components[comp].add_storage_cons(self.__model_rh_step, self.__var_dict, self.__flows, fixed_rsl, - rsl_init, t_model, self.__time_steps) + self._components[comp].add_storage_cons(self.__model_rh_step, self._var_dict, self._flows, fixed_rsl, + rsl_init, t_model, self._time_steps) @@ -1175,11 +1175,11 @@ class BaseProsumer: #p_flex = pd.Series(data=float(0), index=t_model) self.add_grd_cons(self.__model_rh_step, rsl_init, t_interval) - ems = components_list['EnergyManagementSystem'](self.__name, self.__configuration, strategy_name, - self.__plant_parameters, self.__flows, self.__components, - self.__component_properties, self.__input_profiles) + ems = components_list['EnergyManagementSystem'](self._name, self._configuration, strategy_name, + self._plant_parameters, self._flows, self._components, + self._component_properties, self._input_profiles) - ems.implement_strategy(self.__model_rh_step, self.__var_dict, t_model) + ems.implement_strategy(self.__model_rh_step, self._var_dict, t_model) def get_grd_exchange(self, result, time_steps): """ @@ -1197,10 +1197,10 @@ class BaseProsumer: #ToDo Decide which exchange method you want to work with elec_grid_imports = [] elec_grid_exports = [] - for component, comp_type in self.__components: + for component, comp_type in self._components: if comp_type == 'StandardACGrid': - elec_grid_exports += self.__flows['electricity'][component][0] - elec_grid_imports += self.__flows['electricity'][component][1] + elec_grid_exports += self._flows['electricity'][component][0] + elec_grid_imports += self._flows['electricity'][component][1] scheduled_grid_import = pd.Series(data=0.0, index=time_steps) scheduled_grid_export = pd.Series(data=0.0, index=time_steps) @@ -1232,50 +1232,50 @@ class BaseProsumer: elec_grid_imports = [] elec_grid_exports = [] - for component, comp_type in self.__components: + for component, comp_type in self._components: if comp_type == 'StandardACGrid': - elec_grid_exports += self.__flows['electricity'][component][0] - elec_grid_imports += self.__flows['electricity'][component][1] + elec_grid_exports += self._flows['electricity'][component][0] + elec_grid_imports += self._flows['electricity'][component][1] grd_export, grd_import = self.get_grd_exchange(baseline_rsl, t_model) grid_exchange = grd_export - grd_import - self.__var_dict['slack_pos_import'] = dict() - self.__var_dict['slack_pos_export'] = dict() - self.__var_dict['slack_neg_import'] = dict() - self.__var_dict['slack_neg_export'] = dict() + self._var_dict['slack_pos_import'] = dict() + self._var_dict['slack_pos_export'] = dict() + self._var_dict['slack_neg_import'] = dict() + self._var_dict['slack_neg_export'] = dict() for t in t_model: # export and import slack variables are necessary to prevent the optimization from using export and import # at the same time. The OF does not consider any costs, therefore the optimizer will not see the downside # of using export and import at the same time. However this is not realistic - self.__var_dict['slack_pos_import'][t] = pyo.Var(bounds=(0, None)) - self.__var_dict['slack_pos_export'][t] = pyo.Var(bounds=(0, None)) - self.__var_dict['slack_neg_import'][t] = pyo.Var(bounds=(0, None)) - self.__var_dict['slack_neg_export'][t] = pyo.Var(bounds=(0, None)) + self._var_dict['slack_pos_import'][t] = pyo.Var(bounds=(0, None)) + self._var_dict['slack_pos_export'][t] = pyo.Var(bounds=(0, None)) + self._var_dict['slack_neg_import'][t] = pyo.Var(bounds=(0, None)) + self._var_dict['slack_neg_export'][t] = pyo.Var(bounds=(0, None)) - model.add_component('slack_pos_import_' + "%s" % t, self.__var_dict['slack_pos_import'][t]) - model.add_component('slack_pos_export_' + "%s" % t, self.__var_dict['slack_pos_export'][t]) - model.add_component('slack_neg_import_' + "%s" % t, self.__var_dict['slack_neg_import'][t]) - model.add_component('slack_neg_export_' + "%s" % t, self.__var_dict['slack_neg_export'][t]) + model.add_component('slack_pos_import_' + "%s" % t, self._var_dict['slack_pos_import'][t]) + model.add_component('slack_pos_export_' + "%s" % t, self._var_dict['slack_pos_export'][t]) + model.add_component('slack_neg_import_' + "%s" % t, self._var_dict['slack_neg_import'][t]) + model.add_component('slack_neg_export_' + "%s" % t, self._var_dict['slack_neg_export'][t]) if 'validation' in model.name or 'flex_error' in model.name: if len(elec_grid_exports) > 0: - model.cons.add(pyo.quicksum(self.__var_dict[var_export][t] for var_export in elec_grid_exports) == output_power[t]) - model.cons.add(pyo.quicksum(self.__var_dict[var_import][t] for var_import in elec_grid_imports) == input_power[t]) + model.cons.add(pyo.quicksum(self._var_dict[var_export][t] for var_export in elec_grid_exports) == output_power[t]) + model.cons.add(pyo.quicksum(self._var_dict[var_import][t] for var_import in elec_grid_imports) == input_power[t]) - model.cons.add(self.__var_dict['slack_pos_import'][t] == 0.0) - model.cons.add(self.__var_dict['slack_neg_import'][t] == 0.0) - model.cons.add(self.__var_dict['slack_pos_export'][t] == 0.0) - model.cons.add(self.__var_dict['slack_neg_export'][t] == 0.0) + model.cons.add(self._var_dict['slack_pos_import'][t] == 0.0) + model.cons.add(self._var_dict['slack_neg_import'][t] == 0.0) + model.cons.add(self._var_dict['slack_pos_export'][t] == 0.0) + model.cons.add(self._var_dict['slack_neg_export'][t] == 0.0) else: if len(elec_grid_exports) > 0: - model.cons.add(pyo.quicksum(self.__var_dict[var_export][t] for var_export in elec_grid_exports) - == grd_export[t] + self.__var_dict['slack_pos_export'][t] - self.__var_dict['slack_neg_export'][t]) + model.cons.add(pyo.quicksum(self._var_dict[var_export][t] for var_export in elec_grid_exports) + == grd_export[t] + self._var_dict['slack_pos_export'][t] - self._var_dict['slack_neg_export'][t]) - model.cons.add(pyo.quicksum(self.__var_dict[var_import][t] for var_import in elec_grid_imports) - == grd_import[t] + self.__var_dict['slack_pos_import'][t] - self.__var_dict['slack_neg_import'][t]) + model.cons.add(pyo.quicksum(self._var_dict[var_import][t] for var_import in elec_grid_imports) + == grd_import[t] + self._var_dict['slack_pos_import'][t] - self._var_dict['slack_neg_import'][t]) def fix_results(self, t_fix): """ @@ -1285,14 +1285,14 @@ class BaseProsumer: t_fix: time steps to fix (considering the overlap) """ - if t_fix[0] == self.__time_steps[0]: - self.__flex = pd.DataFrame(data=self.temp_flex.loc[t_fix], index=self.__time_steps) - self.__rsl['rescheduled'] = pd.DataFrame(data=self.temp_schedule.loc[t_fix], index=self.__time_steps) - self.__rsl['fixed'] = pd.DataFrame(data=self.temp_validation.loc[t_fix], index=self.__time_steps) + if t_fix[0] == self._time_steps[0]: + self.__flex = pd.DataFrame(data=self.temp_flex.loc[t_fix], index=self._time_steps) + self._rsl['rescheduled'] = pd.DataFrame(data=self.temp_schedule.loc[t_fix], index=self._time_steps) + self._rsl['fixed'] = pd.DataFrame(data=self.temp_validation.loc[t_fix], index=self._time_steps) else: self.__flex.loc[t_fix] = self.temp_flex.loc[t_fix] - self.__rsl['rescheduled'].loc[t_fix] = self.temp_schedule.loc[t_fix] - self.__rsl['fixed'].loc[t_fix] = self.temp_validation.loc[t_fix] + self._rsl['rescheduled'].loc[t_fix] = self.temp_schedule.loc[t_fix] + self._rsl['fixed'].loc[t_fix] = self.temp_validation.loc[t_fix] def add_result(self, rsl, rsl_name): """ @@ -1304,10 +1304,10 @@ class BaseProsumer: rsl: df with the results to be stored rsl_name: key for dict """ - if self.__rsl is None: - self.__rsl = dict() + if self._rsl is None: + self._rsl = dict() - self.__rsl[rsl_name] = rsl + self._rsl[rsl_name] = rsl def get_min_cap(self): """ @@ -1320,13 +1320,13 @@ class BaseProsumer: min_cap: minimal capacity in the prosumer """ min_cap = 10000 - for var in self.__var_dict: + for var in self._var_dict: if var[0] == 'cap': #ToDo: correct that and make it more dynamically if var[1] == 'water_tes': min_cap = min(self.temp_flex['e_dch_dmd'].max(), min_cap) else: - min_cap = min(self.__rsl['sizing'][var][0], min_cap) + min_cap = min(self._rsl['sizing'][var][0], min_cap) return min_cap @@ -1346,38 +1346,40 @@ class BaseProsumer: annuity_community: prosumer's annuity with community conditions either with or without DF activations """ # The factor that convert the simulation to ONE year - annual_factor = timedelta(days=365) / (self.__time_steps[-1] - self.__time_steps[0] + timedelta(hours=1)) + annual_factor = timedelta(days=365) / (self._time_steps[-1] - self._time_steps[0] + timedelta(hours=1)) - grd_export, grd_import = self.get_grd_exchange(self.__rsl[0], self.__time_steps) + grd_export, grd_import = self.get_grd_exchange(self._rsl[0], self._time_steps) - if not isinstance(self.__configuration['elec_price'], float): - annuity_init = (+ grd_export * self.__model_resolution * self.__configuration['injection_price'] - - grd_import * self.__model_resolution * self.__configuration['elec_price'][self.__time_steps]) + if not isinstance(self._configuration['elec_price'], float): + annuity_init = (+ grd_export * self._model_resolution * self._configuration['injection_price'] + - grd_import * self._model_resolution * self._configuration['elec_price'][self._time_steps]) annuity_init = (annuity_init.sum() * annual_factor - 12*4.58 -49.56) # 12* 4,13 for grid usage else: - annuity_init = (+ grd_export * self.__model_resolution * self.__configuration['injection_price'] - - grd_import * self.__model_resolution * self.__configuration['elec_price']) + annuity_init = (+ grd_export * self._model_resolution * self._configuration['injection_price'] + - grd_import * self._model_resolution * self._configuration['elec_price']) annuity_init = annuity_init.sum() * annual_factor # for INITIAL SCHEDULE if not df: - annuity_community = (+ grd_export * self.__model_resolution * price_injection[self.__time_steps] - - grd_import * self.__model_resolution * price_ext) + annuity_community = (+ grd_export * self._model_resolution * price_injection[self._time_steps] + - grd_import * self._model_resolution * price_ext) annuity_community = annuity_community.sum() * annual_factor - print('Annuity init ' + self.__name + ': ' + str(annuity_init)) - print('Annuity agg ' + self.__name + ': ' + str(annuity_community)) + print('Annuity init ' + self._name + ': ' + str(annuity_init)) + print('Annuity agg ' + self._name + ': ' + str(annuity_community)) else: # for FIXED SCHEDULE - grd_export, grd_import = self.get_grd_exchange(self.__rsl['fixed'], self.__time_steps) + grd_export, grd_import = self.get_grd_exchange(self._rsl['fixed'], self._time_steps) - annuity_community = (+ grd_export * self.__model_resolution * price_injection[self.__time_steps] - - grd_import * self.__model_resolution * price_ext) + annuity_community = (+ grd_export * self._model_resolution * price_injection[self._time_steps] + - grd_import * self._model_resolution * price_ext) annuity_community = annuity_community.sum() * annual_factor - print('Annuity with df ' + self.__name + ': ' + str(annuity_community)) + print('Annuity with df ' + self._name + ': ' + str(annuity_community)) return annuity_init, annuity_community + + diff --git a/Prosumer/model/CommunityAsset.py b/Prosumer/model/CommunityAsset.py index 07a03a8d5da459194f52b06ca84a81340d4cf05b..2a2413b5170dc44b0b3bcbed135bb04cac08044b 100644 --- a/Prosumer/model/CommunityAsset.py +++ b/Prosumer/model/CommunityAsset.py @@ -21,8 +21,13 @@ components = component_loader.run(component_lib_path) class CommunityAsset(BaseProsumer): def __init__(self, name, ca_configuration): + """ + It is still to decide if we use this class as a childens-class based on the BaseProsumer or independently. + If we use the inheritance, the protection level of BaseProsumer variables should be changed. + """ super().__init__(name, ca_configuration) + def add_ca_cons(self, time_steps, model, var_dict): energy_storage = pd.Series(data=0, index=time_steps) @@ -37,18 +42,18 @@ class CommunityAsset(BaseProsumer): for t in time_steps: if t == time_steps[0]: if energy_storage[t] > init_soc * cap: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] <= 0) + model.cons.add(var_dict['P_flex_' + self._name][t] <= 0) elif energy_storage[t] < init_soc * cap: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] >= 0) + model.cons.add(var_dict['P_flex_' + self._name][t] >= 0) else: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] == 0) + model.cons.add(var_dict['P_flex_' + self._name][t] == 0) else: if energy_storage.iloc[time_steps.index(t)] > energy_storage.iloc[time_steps.index(t)-1]: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] <= 0) + model.cons.add(var_dict['P_flex_' + self._name][t] <= 0) elif energy_storage.iloc[time_steps.index(t)] < energy_storage.iloc[time_steps.index(t)-1]: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] >= 0) + model.cons.add(var_dict['P_flex_' + self._name][t] >= 0) else: - model.cons.add(var_dict['P_flex_' + self._BaseProsumer__name][t] == 0) + model.cons.add(var_dict['P_flex_' + self._name][t] == 0) def build_sizing_model_new(self, model): """ @@ -60,8 +65,8 @@ class CommunityAsset(BaseProsumer): ---------------------------------------- model: model of all community components together """ - temp_var_dict = copy.deepcopy(self._BaseProsumer__var_dict) - copy_flow = copy.deepcopy(self._BaseProsumer__flows) # --> to not change the flows + temp_var_dict = copy.deepcopy(self._var_dict) + copy_flow = copy.deepcopy(self._flows) # --> to not change the flows # a model for the BaseProsumer object is build for later use in recalculation super()._build_math_model('annuity', components) @@ -69,23 +74,23 @@ class CommunityAsset(BaseProsumer): # Add flow dependent decision variables for var in temp_var_dict: # Till here, var_dict only contains power flow variables, thus all time related - for t in self._BaseProsumer__time_steps: + for t in self._time_steps: temp_var_dict[var][t] = pyo.Var(bounds=(0, None)) model.add_component(var[0] + '_' + var[1] + "_%s" % t, temp_var_dict[var][t]) # Add component dependent decision variables - for comp in self._BaseProsumer__components: - self._BaseProsumer__components[comp].add_variables(self._BaseProsumer__input_profiles, - self._BaseProsumer__plant_parameters, + for comp in self._components: + self._components[comp].add_variables(self._input_profiles, + self._plant_parameters, temp_var_dict, copy_flow, model, - self._BaseProsumer__time_steps) + self._time_steps) # Add component dependent constraints - for comp in self._BaseProsumer__components: - self._BaseProsumer__components[comp].add_all_constr(model, copy_flow, + for comp in self._components: + self._components[comp].add_all_constr(model, copy_flow, temp_var_dict, - self._BaseProsumer__time_steps) + self._time_steps) return model, temp_var_dict @@ -101,9 +106,9 @@ class CommunityAsset(BaseProsumer): elec_grid_inputs = [] elec_grid_outputs = [] - for component, comp_type in self.__components: + for component, comp_type in self._components: if comp_type == 'StandardACGrid': - elec_grid_inputs += self.__flows['electricity'][component][0] - elec_grid_outputs += self.__flows['electricity'][component][1] + elec_grid_inputs += self._flows['electricity'][component][0] + elec_grid_outputs += self._flows['electricity'][component][1] return elec_grid_inputs, elec_grid_outputs \ No newline at end of file diff --git a/Prosumer/model/__init__.py b/Prosumer/model/__init__.py index 3f519599f9fbdfe579b04adda14d061fbcc30a2e..53344a2ec52d33a0f4c263ca990897244a9802e4 100644 --- a/Prosumer/model/__init__.py +++ b/Prosumer/model/__init__.py @@ -3,3 +3,4 @@ # Created by jgn on 30.11.2020. from .BaseProsumer import BaseProsumer +from .CommunityAsset import CommunityAsset diff --git a/Prosumer/scripts/__init__.py b/Prosumer/scripts/__init__.py index cf243b06ca693bd694f7c41ae859419f92f4cc5b..5bdafc4a3529c4cf03dce4a26ffb3ee2610e8f58 100644 --- a/Prosumer/scripts/__init__.py +++ b/Prosumer/scripts/__init__.py @@ -1,2 +1,2 @@ from .get_all_class import run -from .extract_inputs import run \ No newline at end of file +#from .extract_inputs import run \ No newline at end of file diff --git a/Prosumer/scripts/extract_inputs.py b/Prosumer/scripts/extract_inputs.py index f7e8c16e244f8159082082c528a13f420028f47b..aa19a61033ffea88eb1d01a3f439e2a89dca5b53 100644 --- a/Prosumer/scripts/extract_inputs.py +++ b/Prosumer/scripts/extract_inputs.py @@ -1,7 +1,7 @@ """ -The FEN-Tool is an optimization tool for prosumer, district, and interconnected city models. +The FEN-Tool is an optimisation tool for prosumer, district, and interconnected city models. -Copyright (C) 2022. Mauricio Celi Cortés, Jingyu Gong, Jonas van Ouwerkerk, Felix Wege, Nie Yi, Jonas Brucksch +Copyright (C) 2022. Mauricio Celi Cortés, Jingyu Gong, Jonas van Ouwerkerk, Felix Wege, Nie Yi, Jonas Brucksch This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License @@ -26,100 +26,100 @@ import os import pandas as pd import numpy as np -prices_2021_path = 'input_files/spot_price_2021.pkl' # in €/kWh -def get_config(config_csv_path): - """ - The method XYZ adds ... - :param - XYZ: - :returns - XYZ - """ - - config_df = pd.read_csv(config_csv_path) - config_dict = config_df.to_dict(orient='list') - - for k in config_dict: - config_dict[k] = config_dict[k][0] - return config_dict - -def get_path(data_path): - """ - The method XYZ adds ... - :param - XYZ: - :returns - XYZ - """ - - data_df = pd.read_csv(data_path) - data_dict = data_df.to_dict(orient='list') - path_dict = {} - ctr_path = 0 - - for k in data_dict['type']: - path_dict[k] = data_dict['path'][ctr_path] - ctr_path = ctr_path + 1 - return path_dict - - -def prepare_time_series_data(data_path, t_step): - """ - The method XYZ adds ... - :param - XYZ: - :returns - XYZ - """ - - data_df = pd.read_csv(data_path, index_col=0) - try: - data_df.index = pd.to_datetime(data_df.index, format='%d-%m-%Y %H:%M:%S') - except ValueError: - data_df.index = pd.to_datetime(data_df.index, format='%Y-%m-%d %H:%M:%S') - data_df = data_df.resample(str(t_step) + 'H').mean().interpolate('linear') - return data_df - - -def get_sector_matrices(scenario_path): - """ - The method XYZ adds ... - :param - XYZ: - :returns - XYZ - """ - - all_files = os.listdir(scenario_path) - matrices_path = {} - for this_file in all_files: - if this_file.find('matrix') and this_file.endswith('.csv'): - if this_file.find('elec') is not -1: - matrices_path.update({'electricity_matrix_path': os.path.join(scenario_path, this_file)}) - elif this_file.find('therm') is not -1: - matrices_path.update({'heat_matrix_path': os.path.join(scenario_path, this_file)}) - elif this_file.find('gas') is not -1: - matrices_path.update({'gas_matrix_path': os.path.join(scenario_path, this_file)}) - elif this_file.find('hydro') is not -1: - matrices_path.update({'hydrogen_matrix_path': os.path.join(scenario_path, this_file)}) - return matrices_path - - -def run(data_path_dict, prosumer, t_step): - """ - The method XYZ adds ... - :param - XYZ: - :returns - XYZ - """ - - # Loop through dictionary and extract all data - all_path_data = {} - for data_type in data_path_dict[prosumer]: - test = data_path_dict[prosumer][data_type] - if data_path_dict[prosumer][data_type] != 'generate': - temp_data = prepare_time_series_data(data_path_dict[prosumer][data_type], t_step) - all_path_data[data_type] = temp_data - # Return loaded time series data - return all_path_data +class ProsumerExtractor: + def get_config(self, config_csv_path): + """ + The method XYZ adds ... + :param + XYZ: + :returns + XYZ + """ + + config_df = pd.read_csv(config_csv_path) + config_dict = config_df.to_dict(orient='list') + + for k in config_dict: + config_dict[k] = config_dict[k][0] + return config_dict + + def get_path(self, data_path): + """ + The method XYZ adds ... + :param + XYZ: + :returns + XYZ + """ + + data_df = pd.read_csv(data_path) + data_dict = data_df.to_dict(orient='list') + path_dict = {} + ctr_path = 0 + + for k in data_dict['type']: + path_dict[k] = data_dict['path'][ctr_path] + ctr_path = ctr_path + 1 + return path_dict + + + def prepare_time_series_data(self, data_path, t_step): + """ + The method XYZ adds ... + :param + XYZ: + :returns + XYZ + """ + + data_df = pd.read_csv(data_path, index_col=0) + try: + data_df.index = pd.to_datetime(data_df.index, format='%d-%m-%Y %H:%M:%S') + except ValueError: + data_df.index = pd.to_datetime(data_df.index, format='%Y-%m-%d %H:%M:%S') + data_df = data_df.resample(str(t_step) + 'H').mean().interpolate('linear') + return data_df + + + def get_sector_matrices(self, scenario_path): + """ + The method XYZ adds ... + :param + XYZ: + :returns + XYZ + """ + + all_files = os.listdir(scenario_path) + matrices_path = {} + for this_file in all_files: + if this_file.find('matrix') and this_file.endswith('.csv'): + if this_file.find('elec') is not -1: + matrices_path.update({'electricity_matrix_path': os.path.join(scenario_path, this_file)}) + elif this_file.find('therm') is not -1: + matrices_path.update({'heat_matrix_path': os.path.join(scenario_path, this_file)}) + elif this_file.find('gas') is not -1: + matrices_path.update({'gas_matrix_path': os.path.join(scenario_path, this_file)}) + elif this_file.find('hydro') is not -1: + matrices_path.update({'hydrogen_matrix_path': os.path.join(scenario_path, this_file)}) + return matrices_path + + + def run(self, data_path_dict, prosumer, t_step): + """ + The method XYZ adds ... + :param + XYZ: + :returns + XYZ + """ + + # Loop through dictionary and extract all data + all_path_data = {} + for data_type in data_path_dict[prosumer]: + test = data_path_dict[prosumer][data_type] + if data_path_dict[prosumer][data_type] != 'generate': + temp_data = self.prepare_time_series_data(data_path_dict[prosumer][data_type], t_step) + all_path_data[data_type] = temp_data + # Return loaded time series data + return all_path_data