Skip to content
Snippets Groups Projects
Commit de2686f8 authored by Jonas Brucksch's avatar Jonas Brucksch
Browse files

- adds .lp files to gitignore

- changes according to my Todos
- deletes old "runme_community" file and renames the "runme_community_new" file to "runme_community"
parent e84aaa90
No related branches found
No related tags found
No related merge requests found
...@@ -22,7 +22,7 @@ var/ ...@@ -22,7 +22,7 @@ var/
*.egg-info/ *.egg-info/
.installed.cfg .installed.cfg
*.egg *.egg
.lp
# PyInstaller # PyInstaller
# Usually these files are written by a python script from a template # Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it. # before PyInstaller builds the exe, so as to inject date/other infos into it.
......
Subproject commit 58a920d79c827c5a9da7203ba3ebe95cbac64608 Subproject commit e865185fd95a787af218873e1f58b23fc9f3f849
# runme.py is the central script to execute the optimization of the community. # runme.py is the central script to execute the optimization of the community.
"""
The FEN-Tool is an optimisation tool for prosumer, district, and interconnected city models.
Copyright (C) 2022. Mauricio Celi Cortés, Jingyu Gong, Jonas van Ouwerkerk, Felix Wege, Nie Yi, Jonas Brucksch
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Project host: RWTH Aachen University, Aachen, Germany
Project Website: https://www.fenaachen.net/projekte/fen-ineed-dc
"""
# Importing the necessary files # Importing the necessary files
import numpy as np import numpy as np
import Model_Library.District.scripts as scripts
from datetime import timedelta from datetime import timedelta
import math import math
import time import time
import pandas as pd import pandas as pd
import Model_Library.Prosumer.scripts as scripts import Model_Library.Prosumer.scripts as scripts
import Model_Library.Prosumer.main as main import Model_Library.Prosumer.main as main
import Model_Library.Prosumer.scripts.extract_inputs as extract_inputs
from functools import partial from functools import partial
from multiprocessing import Pool from multiprocessing import Pool
from tqdm import tqdm from tqdm import tqdm
...@@ -39,8 +19,7 @@ import argparse ...@@ -39,8 +19,7 @@ import argparse
from Model_Library.Prosumer.scripts.results_evaluation.results_evaluation import Plot_savings from Model_Library.Prosumer.scripts.results_evaluation.results_evaluation import Plot_savings
import Model_Library.Prosumer.main_ca as main_ca import Model_Library.District.main_district as main_district
import Model_Library.District.model as comm_model
def process_each_prosumer(prosumer_name, prosumer_dict, data_source, commentary, no_process_bar_rh): def process_each_prosumer(prosumer_name, prosumer_dict, data_source, commentary, no_process_bar_rh):
...@@ -109,7 +88,6 @@ def process_each_prosumer(prosumer_name, prosumer_dict, data_source, commentary, ...@@ -109,7 +88,6 @@ def process_each_prosumer(prosumer_name, prosumer_dict, data_source, commentary,
# Calculate number of rolling horizon intervals and loop through them # Calculate number of rolling horizon intervals and loop through them
for t in tqdm(pd.date_range(t_start, t_end - pd.Timedelta(hours=t_rh_shift + 1), freq=str(t_rh_shift) + 'H'), disable=no_process_bar_rh): for t in tqdm(pd.date_range(t_start, t_end - pd.Timedelta(hours=t_rh_shift + 1), freq=str(t_rh_shift) + 'H'), disable=no_process_bar_rh):
# ToDo: replace first value with perfect value (can be done in runme)
# set end date for current loop # set end date for current loop
t_end_loop = t + pd.Timedelta(hours=t_rh_horizon) t_end_loop = t + pd.Timedelta(hours=t_rh_horizon)
...@@ -181,8 +159,26 @@ if __name__ == "__main__": ...@@ -181,8 +159,26 @@ if __name__ == "__main__":
'data_path': topology_path[i] + '/data_path.csv', 'data_path': topology_path[i] + '/data_path.csv',
'rolling_horizon': rolling_horizon[i]}""" 'rolling_horizon': rolling_horizon[i]}"""
prosumer_dict={'SCN2_CAT1_PV14_HP_3000_6000_1': {'elec_demand': 3000, 'therm_demand': 6000, 'hot_water_demand': 1500, 'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP', 'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP/config.csv', 'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP/data_path.csv', 'rolling_horizon': False}, prosumer_dict={'SCN2_CAT1_PV11_3000_6000': {'elec_demand': 3000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'SCN2_CAT1_PV14_HP_3000_6000_2': {'elec_demand': 1500, 'therm_demand': 6000, 'hot_water_demand': 1500, 'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP', 'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP/config.csv', 'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV14_HP/data_path.csv', 'rolling_horizon': False}} 'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11',
'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11/data_path.csv',
'rolling_horizon': False},
'SCN2_CAT1_PV12_BA_6000_6000': {'elec_demand': 6000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA',
'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA/data_path.csv',
'rolling_horizon': False},
'SCN0_CAT1_6000_6000': {'elec_demand': 6000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN0_CAT1',
'config_path': 'input_files/models/prosumer_models/SCN0_CAT1/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN0_CAT1/data_path.csv',
'rolling_horizon': False},
'SCN0_CAT1_3000_6000': {'elec_demand': 3000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN0_CAT1',
'config_path': 'input_files/models/prosumer_models/SCN0_CAT1/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN0_CAT1/data_path.csv',
'rolling_horizon': False}}
# PLEASE CHANGE HERE # PLEASE CHANGE HERE
# Select data source # Select data source
# Options: '1': import from database, '2': import from local folder # Options: '1': import from database, '2': import from local folder
...@@ -234,18 +230,25 @@ data_source = 2 # [1]: datasource from data bank; [2]: datasource from local ...@@ -234,18 +230,25 @@ data_source = 2 # [1]: datasource from data bank; [2]: datasource from local
commentary = False commentary = False
t_start = pd.Timestamp("2019-07-01 00:00:00") t_start = pd.Timestamp("2019-07-01 00:00:00")
t_end = pd.Timestamp("2019-7-10 23:00:00") + pd.Timedelta(hours=1) t_end = pd.Timestamp("2019-07-10 23:00:00") + pd.Timedelta(hours=1)
t_step = 1 t_step = 1
t_rh_horizon = (t_end - t_start) / pd.Timedelta(hours=1) t_rh_horizon = (t_end - t_start) / pd.Timedelta(hours=1)
t_horizon = (pd.Timestamp(t_end) - pd.Timestamp(t_start)) / np.timedelta64(t_step, 'h') t_horizon = (pd.Timestamp(t_end) - pd.Timestamp(t_start)) / np.timedelta64(t_step, 'h')
"------------------------Communnity Asset---------------------------"
"-------------------------------------------------------------------"
topology_path = 'input_files/district_models/jbr_test_ca' topology_path = 'input_files/models/district_models/example_CA'
config_path = topology_path + '/config.csv' config_path = topology_path + '/config.csv'
data_path = topology_path + '/data_path.csv' data_path = topology_path + '/data_path.csv'
ca_dict = {'ca_bat': {'topology_path': topology_path, 'config_path': config_path, 'data_path': data_path}} ca_dict = {'ca_bat': {'elec_demand': 0,
ca_dict = {} 'therm_demand': 0,
'hot_water_demand': 0,
'topology_path': topology_path,
'config_path': config_path,
'data_path': data_path}}
#ca_dict = {}
# PLEASE CHANGE HERE # PLEASE CHANGE HERE
# Prediction settings # Prediction settings
predictions = {'demand_electric': 'SameHourYesterday', predictions = {'demand_electric': 'SameHourYesterday',
...@@ -263,14 +266,8 @@ storage_states = {} ...@@ -263,14 +266,8 @@ storage_states = {}
interim_results = {} interim_results = {}
final_iteration = False final_iteration = False
# ca_strategy = 'sizing_min_costs'
# ca_strategy = 'sizing_max_wholesale_profit'
ca_strategy = 'sizing_max_operational_profit' ca_strategy = 'sizing_max_operational_profit'
comm_strategy_list = ['max_operational_profit']
tax_model = 'no internal taxes' # no internal taxes
# create time steps list # create time steps list
time_steps = [] time_steps = []
for t in pd.date_range(pd.Timestamp(t_start), for t in pd.date_range(pd.Timestamp(t_start),
...@@ -284,255 +281,32 @@ t_rh_shift = 0 ...@@ -284,255 +281,32 @@ t_rh_shift = 0
t_current_value_length = (t_end-t_start)/np.timedelta64(t_step, 'h') t_current_value_length = (t_end-t_start)/np.timedelta64(t_step, 'h')
t_history = t_horizon/24 t_history = t_horizon/24
"""---------------------------COMMUNITY-LEVEL----------------------"""
"""----------------------------------------------------------------"""
# initialize community component in the same way prosumers are. # initialize community component in the same way prosumers are.
# The difference is that they are not directly optimized # The difference is that they are not directly optimized
start_ca_init = time.time() comm_assets = main.Main_CA(data_source, ca_dict, t_start, t_end, t_step, predictions, t_current_value_length, t_end,
comm_assets = main_ca.Main(data_source, ca_dict, t_start, t_end, t_step, predictions, t_current_value_length, t_end,
t_history, commentary, storage_states, t_rh_shift, aggregation=False) t_history, commentary, storage_states, t_rh_shift, aggregation=False)
end_ca_init = time.time()
topology_path = 'input_files/models/district_models/jbr_test_comm'
"""---------------------------COMMUNITY-LEVEL----------------------"""
"""----------------------------------------------------------------"""
topology_path = 'input_files/models/district_models/example_community'
config_path = topology_path + '/config.csv' config_path = topology_path + '/config.csv'
data_path_comm = topology_path + '/data_path.csv' data_path_comm = topology_path + '/data_path.csv'
comm_dict = {'community': {'topology_path': topology_path, 'config_path': config_path, 'data_path': data_path_comm}} comm_dict = {'community': {'topology_path': topology_path, 'config_path': config_path, 'data_path': data_path_comm}}
# Initialize community comm_strategy = ['max_operational_profit']
# injection price: 2019 # changes with size!
# bis10kwp: 8.16 (01.01.21), bis 40: 7.93
# (https://www.solaranlagen-portal.com/photovoltaik/wirtschaftlichkeit/einspeiseverguetung)
# network usage fees: all from Regionetz
# levy reduction: just 40% of EEG is calculated for self consumed electricity
# (as in https://www.buendnis-buergerenergie.de/fileadmin/user_upload/2020-03-06_EnergyBrainpool_Impulspapier-Energy-Sharing.pdf)
# general levies: https://www.netztransparenz.de/
# consession fees: city of Aachen ( < 500000 people) # (ct/kWh)
# https://www.acteno.de/ecms/de/energieanwendungen-4-0/konzessionsabgabe.html
# electrixity tax reduction: assumption from https://www.buendnis-buergerenergie.de/fileadmin/user_upload/2020-03-06_EnergyBrainpool_Impulspapier-Energy-Sharing.pdf
comm_config = {'injection_price': pd.Series(data=0.0793, index=time_steps),
'injection/pvpeak': 0.7,
'gas_price': 0.0606, # 2019 https://www.verivox.de/gas/verbraucherpreisindex/
'network_usage_capacity_fee_low': 14.79, # in €/kWa if yearly full load hours are less than 2500 h
'network_usage_energy_fee_low': 0.0506, # in €/kWh if yearly full load hours are less than 2500 h
# high tariffs are not used for now.
# The program does not know if the community has less or more than 2500 full load hours
'network_usage_capacity_fee_high': 50.54, # in €/kWa if yearly full load hours are more than 2500 h
'network_usage_energy_fee_high': 0.0363, # in €/kWh if yearly full load hours are more than 2500 h
'levies_int': 0.0276,
'levies_ext': 0.0496, # in ct/kWh (KWK Umlage + Offshore Umlage + AbLV Umlage + StromNEV Umlage + EEG)
'concession': 0.0199,
'electricity_tax_int': 0,
'electricity_tax_ext': 0.0205, # in ct/kWh if not self consumed
'VAT': 0.19, # in %
'heat_price': 0,
'cooling_price': 0,
'injection_price_gas': 0,
'injection_price_heat': 0,
'injection_price_cooling': 0,
'elec_emission': 0.401, # 2019
'gas_emission': 0.21,
'yearly_interest': 0.03, # letmathe
'planning_horizon': 20}
comm_data_path_dict = dict.fromkeys('community')
comm_data_path_dict['community'] = scripts.extract_inputs.get_path(data_path_comm)
comm_data = scripts.extract_inputs.run(comm_data_path_dict, 'community', 1)
elec_price = comm_data['elec_price'].iloc[:,0]
comm_config['spot_price'] = elec_price
comm_config['injection_price'] = elec_price
if tax_model == 'no internal taxes':
comm_config['elec_price_int_low'] = comm_config['spot_price']
comm_config['elec_price_int_high'] = comm_config['spot_price']
elif tax_model == 'internal taxes':
comm_config['elec_price_int_low'] = (comm_config['spot_price'] + comm_config['network_usage_energy_fee_low']
+ comm_config['levies_int'] + comm_config['concession'] + comm_config[
'electricity_tax_int']) \
* (1 + comm_config['VAT'])
comm_config['elec_price_int_high'] = (comm_config['spot_price'] + comm_config['network_usage_energy_fee_high']
+ comm_config['levies_int'] + comm_config['concession'] + comm_config[
'electricity_tax_int']) \
* (1 + comm_config['VAT'])
else: community_main = main_district.MainDistrict(final_prosumer_dict,
print('No valid tax model') comm_assets,
time_steps,
comm_config['elec_price_ext_low'] = (comm_config['spot_price'] + comm_config['network_usage_energy_fee_low'] t_step,
+ comm_config['levies_ext'] + comm_config['concession'] + comm_config[ comm_dict,
'electricity_tax_ext']) \ ca_strategy,
* (1 + comm_config['VAT']) comm_strategy,
comm_config['elec_price_ext_high'] = (comm_config['spot_price'] + comm_config['network_usage_energy_fee_high'] t_horizon)
+ comm_config['levies_ext'] + comm_config['concession'] + comm_config[
'electricity_tax_ext']) \
* (1 + comm_config['VAT'])
# Create object of BaseCommunity-class
#ToDo JBR: has to happen in community main
start_comm_init = time.time()
community = comm_model.BaseComm('TestComm', {'Prosumer': final_prosumer_dict, 'Comm_Comps': comm_assets.comm_assets},
time_steps, {'Prosumer': final_prosumer_dict.keys(), 'Comm_Comps': comm_assets.comm_assets.keys()},
t_step, comm_config, ca_strategy)
end_comm_init = time.time()
"""---------------------ROLLING HORIZON (RH) ------------------------------------------------------------------------"""
"""-----------------------------------------------------------------------------------------------------------------"""
t_overlap_default = 0 # deafult: 0; nr of timesteps the starting point of each interval is shifted into the prior interval
t_interval_default = 4*24 # default: 4*24; number of time steps in one interval of the RH
t_start_rh = time_steps[0] # start of
t_last = 0
t_interval = t_interval_default / t_step
last_t_interval = t_interval_default / t_step
last_t_overlap = t_overlap_default / t_step
t_overlap = t_overlap_default / t_step
start_rh = time.time()
n_change = 0
while t_last * t_step < t_horizon:
print('-------------------' + str(t_last) + '-----------------')
# PREPARE INTERVAL
if (t_last + t_interval - t_overlap) * t_step < t_horizon and (t_last + t_interval) * t_step <= t_horizon:
# interval does not reach overall end
t_horizon_rh = int(t_interval) # number of time steps in this RH-interval
t_fix = max(t_horizon_rh - int(t_overlap), 1) # number of fixed time steps of this RH-interval
elif (t_last + t_interval - t_overlap) * t_step < t_horizon and (t_last + t_interval) * t_step > t_horizon:
# interval reaches end of horizon, but just without overlap
t_horizon_rh = int(len(time_steps) - t_last) # interval length is adapted
t_fix = max(int(t_interval) - int(t_overlap), 1) # but not the number of fixed time steps
else:
# fixed time steps hit overall time horizon --> adapt also the fixed time steps
t_horizon_rh = int(len(time_steps) - t_last)
t_fix = t_horizon_rh
# extract relevant timesteps values from overall time series
time_steps_rh = time_steps[t_last:t_last + t_horizon_rh] # whole interval size
time_steps_fix = time_steps[t_last:t_last + t_fix] # without overlap
# STEP 1 --> SCHEDULE EXTRACTION
start_schedule = time.time()
for ps in final_prosumer_dict.values():
valid = ps.calc_rh_step_schedule(time_steps_rh)
if not valid:
n_change += 1
t_interval = int(math.floor(t_interval / 2))
if t_interval == 0:
t_interval = 1
if t_interval < t_overlap:
t_overlap = t_interval - 1
break
if not valid:
continue
for cc in community.comm_assets.values():
valid = cc.calc_rh_step_schedule(time_steps_rh)
if not valid:
n_change += 1
t_interval = int(math.floor(t_interval / 2))
if t_interval == 0:
t_interval = 1
if t_interval < t_overlap:
t_overlap = t_interval - 1
break
if not valid:
continue
end_schedule = time.time()
# STEP 2 --> DF IDENTIFICATION
start_flex_calc = time.time()
for ps in final_prosumer_dict.values():
ps.calc_flex(time_steps_rh)
for cc in community.comm_assets.values():
cc.calc_flex(time_steps_rh)
# aggregate prosumer DFs in community
community.aggregate_temp(time_steps_rh, final_prosumer_dict, community.comm_assets)
end_flex_calc = time.time()
# COMMUNITY OPTIMIZATION
start_opt = time.time()
valid = community.run_optimization(time_steps_rh, comm_strategy_list, timelimit=200)
if not valid:
n_change += 1
t_interval = int(math.floor(t_interval / 2))
if t_interval == 0:
t_interval = 1
if t_interval < t_overlap:
t_overlap = t_interval - 1
continue
end_opt = time.time()
# VALIDATION
start_validation = time.time()
valid = community.validate_result(time_steps_fix)
if valid:
# fix results...
for ps in final_prosumer_dict.values():
ps.fix_results(time_steps_fix)
for cc in community.comm_assets.values():
cc.fix_results(time_steps_fix)
community.aggregate_fix(time_steps_fix)
# ...and go to next interval
t_last = int(t_last + max(t_interval - t_overlap, 1))
t_interval = t_interval_default * 1 / t_step
t_overlap = t_overlap_default * 1 / t_step
elif not valid:
# nr of interval changes
n_change += 1
t_interval = int(math.floor(t_interval / 2))
if t_interval == 0:
t_interval = 1
if t_interval < t_overlap:
t_overlap = t_interval - 1
if not valid and t_interval < 1:
print('Unteres Limit erreicht')
end_validation = time.time()
end_rh = time.time()
# ------------POST-PROCESS----------------- # ------------POST-PROCESS-----------------
start_analysis = time.time()
community.analyze_results_new()
# community.plot_grid_exchange()
end_analysis = time.time()
start_write = time.time()
folder_name = 'EC_' + str(len(final_prosumer_dict)) + '_' + str(t_horizon / 24) + '_' + str(t_interval_default) + '_' + str(t_overlap_default)
flag = True
iter = 0
while flag:
#ToDo JBR: apply Felix changes
try:
os.mkdir('output_files/' + folder_name)
flag = False
except FileExistsError:
iter += 1
folder_name = folder_name + '_' + str(iter)
#ToDO jbr: fix this later when the new main is working
"""
prosumers.show_results_agents(final_prosumer_dict.keys(), folder_name)
if comm_assets:
comm_assets.show_results_agents(comm_assets.comm_assets.keys(), folder_name)
#Todo JBR: change to community main
prosumers.export_comm(community, time_steps, folder_name)
"""
end_write = time.time()
end_total = time.time()
......
# runme.py is the central script to execute the optimization of the community.
# Importing the necessary files
import numpy as np
import Model_Library.District.scripts as scripts
from datetime import timedelta
import math
import time
import pandas as pd
import Model_Library.Prosumer.scripts as scripts
import Model_Library.Prosumer.main as main
import Model_Library.Prosumer.scripts.extract_inputs as extract_inputs
from functools import partial
from multiprocessing import Pool
from tqdm import tqdm
import os
import argparse
from Model_Library.Prosumer.scripts.results_evaluation.results_evaluation import Plot_savings
import Model_Library.Prosumer.main_ca as main_ca
import Model_Library.District.main_district as main_district
def process_each_prosumer(prosumer_name, prosumer_dict, data_source, commentary, no_process_bar_rh):
try:
# PLEASE CHANGE HERE
# Set the simulation time frame and optional rolling horizon configurations:
# 't_start': start date of simulations, Unit: JJJJ-MM-DD hh:mm:ss
# 't_end': end date of simulations, Unit: JJJJ-MM-DD hh:mm:ss
# 't_step': granularity of optimization model, Unit: hours
# Rolling horizon (RH) can be set by:
# 't_rh_horizon': width of rolling horizon intervals, Unit: hours, MINIMUM VALUE: 2 !!!
# 't_rh_shift': rolling horizon shift between intervals, Unit: hours
# 't_current_value_length': number of values at beginning of rolling horizon interval that are replaced by real values, Unit: hours
# 't_history': number of days before actual simulation interval for the demand generator to be able to make required predictions
if prosumer_dict[prosumer_name]['rolling_horizon']:
t_start = pd.Timestamp("2019-09-01 00:00:00")
t_end = pd.Timestamp("2019-09-01 5:00:00")
t_step = 1
t_rh_horizon = 3
t_rh_shift = 1
t_current_value_length = 2
t_history = 14 # days
# PLEASE CHANGE HERE
# Prediction settings
predictions = {'demand_electric': 'SameHourYesterday',
'demand_heat': 'SameHourYesterday',
'day_ahead_price': 'SameHourYesterday',
'intraday_price': 'SameHourYesterday',
'solar_radiation': 'Perfect',
# currently the method generate_g_t_series takes the same t_start as the prediction -> no historical
# data for the prediction available: easy fix would be to set a minus time delta in the t_start
# argument of generate_g_t_series
'temperature': 'SameHourYesterday'}
else:
t_start = pd.Timestamp("2019-07-01 00:00:00")
t_end = pd.Timestamp("2019-7-10 23:00:00") + pd.Timedelta(hours=1)
t_step = 1
t_rh_horizon = (t_end - t_start) / pd.Timedelta(hours=1)
t_rh_shift = t_rh_horizon - 1
t_current_value_length = t_rh_horizon
t_history = 0 # days
# PLEASE CHANGE HERE
# Prediction settings
predictions = {'demand_electric': 'Perfect',
'demand_heat': 'Perfect',
'day_ahead_price': 'Perfect',
'intraday_price': 'Perfect',
'solar_radiation': 'Perfect',
# currently the method generate_g_t_series takes the same t_start as the prediction -> no historical
# data for the prediction available: easy fix would be to set a minus time delta in the t_start
# argument of generate_g_t_series
'temperature': 'Perfect'}
# Fixed variables - DO NOT CHANGE
storage_states = {}
interim_results = {}
final_iteration = False
# Set aggregation options
parser = argparse.ArgumentParser(description='Start optimization from DB or local data')
parser.add_argument('-a', '--aggregate', action="store_true", dest="aggregate",
help="activating aggregation of input time series", default=False)
options = parser.parse_args()
# Calculate number of rolling horizon intervals and loop through them
for t in tqdm(pd.date_range(t_start, t_end - pd.Timedelta(hours=t_rh_shift + 1), freq=str(t_rh_shift) + 'H'), disable=no_process_bar_rh):
# ToDo: replace first value with perfect value (can be done in runme)
# set end date for current loop
t_end_loop = t + pd.Timedelta(hours=t_rh_horizon)
# exceptions that occur at global end of simulation horizon
if t_end_loop > t_end:
t_end_loop = t_end
if t_current_value_length > (t_end_loop - t) / pd.Timedelta(hours=1):
t_current_value_length = (t_end_loop - t) / pd.Timedelta(hours=1)
# Set flag for final iteration
if t == t_end - pd.Timedelta(hours=t_rh_shift + 1):
final_iteration = True
# Start main programme
prosumer = main.Main(data_source, {prosumer_name: prosumer_dict[prosumer_name]}, t, t_end_loop, t_step,
predictions, t_current_value_length, t_end,
t_history, commentary, storage_states, t_rh_shift, aggregation=options.aggregate)
# Run optimization
prosumer.run_optimization(prosumer.prosumer_name_list)
# Show results - Results are only plotted after last iteration of rolling horizon
prosumer.show_results(prosumer.prosumer_name_list, interim_results, final_iteration)
# Get storage states from this iteration
storage_states = prosumer.charge_status
# Get interim results of current rolling horizon interval
interim_results = prosumer.interim_results
return prosumer.prosumer[prosumer_name]
except ValueError:
print(prosumer_name+" could not be optimized!")
# # MAIN PROGRAM -------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Initialization scenario path and global variables for the prosumer optimization
# Start timer
start = time.time()
# PLEASE CHANGE HERE
# Path to local data - this is only used when selecting local mode
# 'topology_path': path to matrices that define the prosumer topology
# 'config_path': path to global configurations like prices, injection prices, emission costs, etc.
#topology_path = 'input_files/models/Study_Base'
#config_path = topology_path + '/config.csv'
#data_path = topology_path + '/data_path.csv'
#prosumer_name = 'office'
#prosumer_dict = {prosumer_name: {'topology_path': topology_path, 'config_path': config_path, 'data_path': data_path}}
topology_path = ['input_files/models/prosumer_models/SCN0_CAT1']
prosumer_name = ['SCN0_CAT1', 'SCN0_CAT1']
rolling_horizon = [False]
elec_demand = [1500, 10000]
therm_demand = [5000, 20000]
hot_water_demand = 1500#[1500, 1500]
step_elec_demand = 500
step_therm_demand = 500
#step_hot_water_demand = 0
prosumer_dict = {}
""" for i in range(len(prosumer_name)):
for j in range(elec_demand[0], elec_demand[1], step_elec_demand):
for k in range(therm_demand[0], therm_demand[1], step_therm_demand):
#for l in range(hot_water_demand[0], hot_water_demand[1], step_hot_water_demand):
prosumer_dict[prosumer_name[i]+'_'+str(j)+'_'+str(k)] = {'elec_demand': j,
'therm_demand': k,
'hot_water_demand': hot_water_demand,
'topology_path': topology_path[i],
'config_path': topology_path[i] + '/config.csv',
'data_path': topology_path[i] + '/data_path.csv',
'rolling_horizon': rolling_horizon[i]}"""
prosumer_dict={'SCN2_CAT1_PV11_3000_6000': {'elec_demand': 3000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11',
'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV11/data_path.csv',
'rolling_horizon': False},
'SCN2_CAT1_PV12_BA_6000_6000': {'elec_demand': 6000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA',
'config_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN2_CAT1_PV12_BA/data_path.csv',
'rolling_horizon': False},
'SCN0_CAT1_6000_6000': {'elec_demand': 6000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN0_CAT1',
'config_path': 'input_files/models/prosumer_models/SCN0_CAT1/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN0_CAT1/data_path.csv',
'rolling_horizon': False},
'SCN0_CAT1_3000_6000': {'elec_demand': 3000, 'therm_demand': 6000, 'hot_water_demand': 1500,
'topology_path': 'input_files/models/prosumer_models/SCN0_CAT1',
'config_path': 'input_files/models/prosumer_models/SCN0_CAT1/config.csv',
'data_path': 'input_files/models/prosumer_models/SCN0_CAT1/data_path.csv',
'rolling_horizon': False}}
# PLEASE CHANGE HERE
# Select data source
# Options: '1': import from database, '2': import from local folder
data_source = 2
reference_results = {}
commentary = False
no_process_bar_rh = True
parallel_processing = False
# Timer output
tic = time.time()
# Start program
# Run multiple independent prosumers in parallel on multiple cores
final_prosumer_dict = dict.fromkeys(prosumer_dict.keys())
if parallel_processing:
count_processes = len(prosumer_dict.keys())
pool = Pool(os.cpu_count())
parallel_func = partial(process_each_prosumer, prosumer_dict=prosumer_dict, data_source=data_source, commentary=commentary, no_process_bar_rh=no_process_bar_rh)
mapped_values = list(tqdm(pool.map(parallel_func, list(prosumer_dict.keys())), total=count_processes))
# Normal processing, one core only
else:
for prosumer_name in list(prosumer_dict.keys()):
final_prosumer_dict[prosumer_name] = process_each_prosumer(prosumer_name= prosumer_name, prosumer_dict=prosumer_dict, data_source=data_source, commentary=commentary, no_process_bar_rh=no_process_bar_rh)
# Timer output
toc = time.time()
if reference_results:
for topology in prosumer_name:
Plot_savings(reference_results, topology)
# Timer output
end = time.time()
# Additional console output
if commentary:
print("============ Execution Times =============")
print("Pre-processing [s]: \t" + str(tic - start))
#print("(Interaction with database [s]:\t" + str(prosumer.connect_with_db) + ")")
print("Optimization [s]: \t" + str(toc - tic))
print("Post-processing [s]: \t" + str(end - toc))
print("----------------------------------------")
print("Total [s]: \t" + str((end - toc) + (toc - tic) + (tic - start)))
print("==========================================")
'-----------------COMMUNITY-PART----------------------------------'
start_total = time.time()
data_source = 2 # [1]: datasource from data bank; [2]: datasource from local
commentary = False
t_start = pd.Timestamp("2019-07-01 00:00:00")
t_end = pd.Timestamp("2019-07-10 23:00:00") + pd.Timedelta(hours=1)
t_step = 1
t_rh_horizon = (t_end - t_start) / pd.Timedelta(hours=1)
t_horizon = (pd.Timestamp(t_end) - pd.Timestamp(t_start)) / np.timedelta64(t_step, 'h')
"------------------------Communnity Asset---------------------------"
"-------------------------------------------------------------------"
topology_path = 'input_files/models/district_models/example_CA'
config_path = topology_path + '/config.csv'
data_path = topology_path + '/data_path.csv'
ca_dict = {'ca_bat': {'elec_demand': 0,
'therm_demand': 0,
'hot_water_demand': 0,
'topology_path': topology_path,
'config_path': config_path,
'data_path': data_path}}
#ca_dict = {}
# PLEASE CHANGE HERE
# Prediction settings
predictions = {'demand_electric': 'SameHourYesterday',
'demand_heat': 'SameHourYesterday',
'day_ahead_price': 'SameHourYesterday',
'intraday_price': 'SameHourYesterday',
'solar_radiation': 'Perfect',
# currently the method generate_g_t_series takes the same t_start as the prediction -> no historical
# data for the prediction available: easy fix would be to set a minus time delta in the t_start
# argument of generate_g_t_series
'temperature': 'SameHourYesterday'}
# Fixed variables - DO NOT CHANGE
storage_states = {}
interim_results = {}
final_iteration = False
ca_strategy = 'sizing_max_operational_profit'
# create time steps list
time_steps = []
for t in pd.date_range(pd.Timestamp(t_start),
pd.Timestamp(t_start) + timedelta(hours=t_horizon) - timedelta(hours=t_step),
freq=str(t_step)+'H'):
time_steps.append(t)
t_start = pd.Timestamp(t_start)
t_end = pd.Timestamp(t_start) + timedelta(hours=t_horizon) - timedelta(hours=t_step)
t_rh_shift = 0
t_current_value_length = (t_end-t_start)/np.timedelta64(t_step, 'h')
t_history = t_horizon/24
# initialize community component in the same way prosumers are.
# The difference is that they are not directly optimized
comm_assets = main.Main_CA(data_source, ca_dict, t_start, t_end, t_step, predictions, t_current_value_length, t_end,
t_history, commentary, storage_states, t_rh_shift, aggregation=False)
"""---------------------------COMMUNITY-LEVEL----------------------"""
"""----------------------------------------------------------------"""
topology_path = 'input_files/models/district_models/example_community'
config_path = topology_path + '/config.csv'
data_path_comm = topology_path + '/data_path.csv'
comm_dict = {'community': {'topology_path': topology_path, 'config_path': config_path, 'data_path': data_path_comm}}
comm_strategy = ['max_operational_profit']
community_main = main_district.MainDistrict(final_prosumer_dict,
comm_assets,
time_steps,
t_step,
comm_dict,
ca_strategy,
comm_strategy,
t_horizon)
# ------------POST-PROCESS-----------------
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment