diff --git a/Model_Library b/Model_Library index ebf6f6bbbd721cd1a91c1f51bd460896aee297a9..c3fb8e02ab0938692ad3695e4deedb3d59aa6ad4 160000 --- a/Model_Library +++ b/Model_Library @@ -1 +1 @@ -Subproject commit ebf6f6bbbd721cd1a91c1f51bd460896aee297a9 +Subproject commit c3fb8e02ab0938692ad3695e4deedb3d59aa6ad4 diff --git a/runme_community.py b/runme_community.py index df096e93d91c935d308c0c26c5e8be0a6a3a8b3c..82316ef5ffed9a860d9c8c3c8dccfeb79f22bb40 100644 --- a/runme_community.py +++ b/runme_community.py @@ -12,11 +12,11 @@ from Model_Library.Prosumer.scripts.results_evaluation.results_evaluation import import Model_Library.District.main_district as main_district -def process_each_prosumer(prosumer_name, prosumer_specification, commentary, t_start, t_horizon, t_step, t_history, prosumer_strategy): +def process_each_prosumer(prosumer_name, prosumer_specification, commentary, t_start, t_horizon, t_step, prosumer_strategy): try: before_setup = time.time() # Start main programme - prosumer = main.Main(prosumer_name, prosumer_specification, t_start, t_horizon, t_step, t_history, commentary) + prosumer = main.Main(prosumer_name, prosumer_specification, t_start, t_horizon, t_step, commentary) after_setup = time.time() print("process_each_prosumer:\tProsumer Construction [s]: \t" + str(after_setup - before_setup)) @@ -40,7 +40,6 @@ if __name__ == "__main__": t_start = pd.Timestamp("2019-05-10 00:00:00") # start time of simulation t_horizon = 240 # number of time steps to be simulated t_step = 1 # length of a time step in hours - t_history = 0 # number of hours before actual simulation interval for the demand generator to be able to make required predictions # Path to local data - this is only used when selecting local mode # 'data_path': path to file specifying where input profiles are located @@ -73,12 +72,12 @@ if __name__ == "__main__": if parallel_processing: count_processes = len(prosumer_dict.keys()) pool = Pool(os.cpu_count()) - parallel_func = partial(process_each_prosumer, commentary = commentary, t_start = t_start, t_horizon = t_horizon, t_step = t_step, t_history = t_history, prosumer_strategy = ps_strategy) + parallel_func = partial(process_each_prosumer, commentary = commentary, t_start = t_start, t_horizon = t_horizon, t_step = t_step, prosumer_strategy = ps_strategy) mapped_values = list(tqdm(pool.map(parallel_func, list(prosumer_dict.keys()), list(prosumer_dict.values())), total = count_processes)) # Normal processing, one core only else: for prosumer_name in list(prosumer_dict.keys()): - final_prosumer_dict[prosumer_name] = process_each_prosumer(prosumer_name, prosumer_dict[prosumer_name], commentary, t_start, t_horizon, t_step, t_history, ps_strategy) + final_prosumer_dict[prosumer_name] = process_each_prosumer(prosumer_name, prosumer_dict[prosumer_name], commentary, t_start, t_horizon, t_step, ps_strategy) after_optimization = time.time() print("runme:\t\t\tProsumer Optimization [s]: \t" + str(after_optimization - before_optimization)) @@ -110,7 +109,7 @@ print("runme:\t\t\tCommunity Assets Setup [s]: \t" + str(before_community_assets # initialize community component in the same way prosumers are. # The difference is that they are not directly optimized -comm_assets = main.Main_CA(ca_dict, t_start, t_horizon, t_step, t_history, commentary) +comm_assets = main.Main_CA(ca_dict, t_start, t_horizon, t_step, commentary) after_community_assets = time.time() print("runme:\t\t\tCommunity Assets Constr. [s]: \t" + str(after_community_assets - before_community_assets))