diff --git a/.gitignore b/.gitignore index c6c3e3b..8660641 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,30 @@ -/Utilities/raw_output/price_history_ACN_state9_week1_ACN.csv -/Utilities/raw_output/requests_ACN_state9_week1_ACN.csv -/Utilities/raw_output/storage_ACN_state9_week1_ACN.csv -/Utilities/raw_output/training_results_pricing_double_capa_500_2_average_power_m_200_m_post_tuning.csv -/Utilities/raw_output/training_results_pricing_double_tra_500_2_average_power_m_200_m_post_tuning.csv -/Utilities/raw_output/load.csv -/Utilities/raw_output/parking_data.csv -/Utilities/raw_output/CSs_ACN_state9_week1_ACN_pricing_double_tra_500_2_average_power_m_200_m_post_tuning.csv -/Utilities/raw_output/empty.py +# IDE and environment files +/.idea/ +/.venv/ + +# Python cache +__pycache__/ +*.pyc + +# Log files +*.log + +# Data files +*.csv +*.pkl +*.pstat + +# macOS files +.DS_Store + +# Output directories +/Utilities/raw_output/ +/Results/raw_output/ +/Cache/ + +# Root level data files +chargingdata.csv +experience.csv +file.csv +investment_results_Facility_1_2019-06-03.csv +investment_results_Facility_KoeBogen_2019-06-03.csv \ No newline at end of file diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index 13566b8..0000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# Default ignored files -/shelf/ -/workspace.xml -# Editor-based HTTP Client requests -/httpRequests/ -# Datasource local storage ignored files -/dataSources/ -/dataSources.local.xml diff --git a/.idea/EVCH_OM.iml b/.idea/EVCH_OM.iml deleted file mode 100644 index c6c7ca6..0000000 --- a/.idea/EVCH_OM.iml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml deleted file mode 100644 index ce18f44..0000000 --- a/.idea/inspectionProfiles/Project_Default.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml deleted file mode 100644 index 105ce2d..0000000 --- a/.idea/inspectionProfiles/profiles_settings.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 7081b01..0000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 61f4670..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 94a25f7..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/Environment/helper/configuration/configuration.py b/Environment/helper/configuration/configuration.py index 05808ec..d4cc84e 100644 --- a/Environment/helper/configuration/configuration.py +++ b/Environment/helper/configuration/configuration.py @@ -59,7 +59,7 @@ def __init__(self): self.price_sensitivity = price_sensitivity peak_penalty = "m" # l, m, h self.adjust_peak_penalty(peak_penalty) - charging_algorithm = "average_power" # average_power, least_laxity_first + charging_algorithm = "average_power" # average_power, least_laxity_first, perfect_info self.charging_algorithm = charging_algorithm self.peak_penalty = peak_penalty PV = 500 diff --git a/Operations/ChargingAlgorithms.py b/Operations/ChargingAlgorithms.py index 2b769d7..63e5cbf 100644 --- a/Operations/ChargingAlgorithms.py +++ b/Operations/ChargingAlgorithms.py @@ -18,7 +18,12 @@ # UNCONTROLLED CHARGING# -def uncontrolled(env, connected_vehicles, charging_capacity, planning_period_length): +def uncontrolled(env, + connected_vehicles, + charging_stations, + charging_capacity, + free_grid_capacity, + planning_period_length,): """ :param env: :param requests: @@ -348,7 +353,12 @@ def custom_priority(charging_stations, charging_capacity, free_grid_capacity): def equal_sharing( - charging_stations, charging_capacity, free_grid_capacity, free_battery_capacity + env, + connected_vehicles, + charging_stations, + charging_capacity, + free_grid_capacity, + planning_period_length, ): """ Computes same-period charging power per each connected vehicle by cascading available grid power equally through the network @@ -400,9 +410,10 @@ def equal_sharing( def average_power( env, connected_vehicles, + charging_stations, charging_capacity, - planning_period_length, free_grid_capacity, + planning_period_length, ): """ :param env: diff --git a/Operations/IntegratedAlgorithms.py b/Operations/IntegratedAlgorithms.py index 7c8de05..df48ab6 100644 --- a/Operations/IntegratedAlgorithms.py +++ b/Operations/IntegratedAlgorithms.py @@ -34,7 +34,7 @@ def perfect_info_charging_routing( mdl = Model("perfect_info_scaling") vehicle_range = [] delta_time = 60 - sim_duration = 5 + sim_duration = 1 for i in vehicles: vehicle_range.append(i.id) diff --git a/Operations/operator.py b/Operations/operator.py index 6261c86..3675157 100644 --- a/Operations/operator.py +++ b/Operations/operator.py @@ -89,12 +89,6 @@ def __init__( self.pricing_agent = None self.minimum_served_demand = minimum_served_demand self.agent_name = "PGMM" # PGM or n_step - self.action = np.zeros(121) - self.storage_action = [0] - # self.pricing_action = None - # self.state = None - # self.pricing_state = None - self.storage_state = None self.generation_min = 0 self.peak_threshold = Configuration.instance().peak_threshold self.peak_cost = Configuration.instance().peak_cost @@ -166,28 +160,6 @@ def get_available_battery_load(self): self.free_battery_load_capa = battery_max - # pricing for different power usages - - def get_power_prices(self, mode): - """ - Routes new arrivals to charging stations. This is on a discrete event basis, i.e. per each arrival (as opposed to the ca) - :param routing_strategy: - :return: - """ - while True: - self.get_exp_free_grid_capacity() - self.update_vehicles_status() - self.take_pricing_action() - self.price_history = pd.concat( - [self.price_history, pd.DataFrame(self.price_pairs[:, 1]).transpose()] - ) - self.update_peak_load_history() - if mode == "discrete_time": - yield self.env.timeout(self.planning_interval) - if mode == "discrete_event": - yield self.arrival_event - self.update_pricing_agent() - # routing def get_routing_instructions(self, request): """ @@ -220,266 +192,222 @@ def get_routing_instructions(self, request): return charger # charging - def get_charging_schedules_and_prices(self, charging_strategy, mode): - """ - Periodically updates charging schedule based on selected strategy. Decides which vehicle charges and how much! - This is on a discrete time basis - :param scheduling_mode: simulation mode (discrete-time or discrete-event) - :param charging_strategy: - :param planning_period_length: length of period (in unit sim time). Schedule is re-computed every n(=period_length) time steps - :return: n/a - """ - first_scheduling = False - while True: - if charging_strategy in ["perfect_info", "perfect_info_with_storage"]: - def schedule_charging(strategy): - self.get_exp_free_grid_capacity() - connected_vehicles = [x for x in self.requests if x.mode is None and x.ev == 1] - - if strategy == "perfect_info": - integrate_algos.perfect_info_charging_routing( - vehicles=connected_vehicles, - charging_stations=self.chargers, - env=self.env, - grid_capacity=self.free_grid_capa_actual, - electricity_cost=self.electricity_tariff, - baseload=self.base_load_list, - sim_time=self.sim_time, - generation=self.generation_list, - ) - elif strategy == "perfect_info_with_storage" and connected_vehicles: - integrate_algos.perfect_info_charging_routing_storage( - vehicles=connected_vehicles, - charging_stations=self.chargers, - env=self.env, - grid_capacity=self.free_grid_capa_actual, - electricity_cost=self.electricity_tariff, - sim_time=self.sim_time, - storage=self.electric_storage, - baseload=self.baseload_list, - ) + def schedule_charging_and_routing_perfect_info(self, strategy): + self.get_exp_free_grid_capacity() + connected_vehicles = [x for x in self.requests if x.mode is None and x.ev == 1] - if not first_scheduling: - schedule_charging(charging_strategy) - first_scheduling = True - - hour = int((self.env.now % 1440) / 60) if charging_strategy == "perfect_info_with_storage" else int( - self.env.now / 60) - - for request in self.requests: - if request.ev == 1: - request.charging_power = request.charge_schedule[hour] - - if charging_strategy == "perfect_info_with_storage" and self.storage_object.max_capacity_kWh > 0: - storage_power = self.electric_storage.charge_schedule[hour] - if storage_power >= 0: - self.electric_storage.charge_yn = 1 - self.electric_storage.discharge_yn = 0 - self.electric_storage.discharging_power = 0 - self.electric_storage.charging_power = storage_power - else: - self.electric_storage.charge_yn = 0 - self.electric_storage.discharge_yn = 1 - self.electric_storage.discharging_power = storage_power - self.electric_storage.charging_power = 0 + if strategy == "perfect_info": + integrate_algos.perfect_info_charging_routing( + vehicles=connected_vehicles, + charging_stations=self.chargers, + env=self.env, + grid_capacity=self.free_grid_capa_actual, + electricity_cost=self.electricity_tariff, + baseload=self.base_load_list, + sim_time=self.sim_time, + generation=self.generation_list, + ) + elif strategy == "perfect_info_with_storage" and connected_vehicles: + integrate_algos.perfect_info_charging_routing_storage( + vehicles=connected_vehicles, + charging_stations=self.chargers, + env=self.env, + grid_capacity=self.free_grid_capa_actual, + electricity_cost=self.electricity_tariff, + sim_time=self.sim_time, + storage=self.electric_storage, + baseload=self.baseload_list, + ) - if self.charging_hub.dynamic_pricing: - self.get_exp_free_grid_capacity() - self.update_vehicles_status() - self.take_pricing_action() - if self.pricing_mode == "Discrete": - self.price_history = pd.concat( - [ - self.price_history, - pd.DataFrame(self.price_pairs[:, 1]).transpose(), - ] - ) - if self.pricing_mode == "Continuous": - # print([self.pricing_parameters[1], self.parking_fee]) - self.price_history = pd.concat( - [ - self.price_history, - pd.DataFrame( - [self.pricing_parameters[0], self.pricing_parameters[1]] - ).transpose(), - ] - ) + def apply_charging_routing_storage_perfect_info(self, charging_strategy): + hour = int((self.env.now % 1440) / 60) if charging_strategy == "perfect_info_with_storage" else int( + self.env.now / 60) + for request in self.requests: + if request.ev == 1: + request.charging_power = request.charge_schedule[hour] + + if charging_strategy == "perfect_info_with_storage" and self.storage_object.max_capacity_kWh > 0: + storage_power = self.electric_storage.charge_schedule[hour] + if storage_power >= 0: + self.electric_storage.charge_yn = 1 + self.electric_storage.discharge_yn = 0 + self.electric_storage.discharging_power = 0 + self.electric_storage.charging_power = storage_power else: - self.get_exp_free_grid_capacity() - self.update_vehicles_status() - if self.pricing_mode == "ToU": - hour = int((self.env.now % 1440) / 60) - self.pricing_parameters[0] = ( - self.electricity_tariff[hour] - / max(self.electricity_tariff) - * Configuration.instance().max_price_ToU - ) - if self.pricing_mode == "perfect_info": - if Configuration.instance().dynamic_fix_term_pricing: - self.pricing_parameters[1] = self.price_schedules[1][hour] - self.pricing_parameters[0] = self.price_schedules[0][hour] - else: - self.pricing_parameters[1] = self.price_schedules[hour] - if self.pricing_mode == "Discrete": - self.price_history = pd.concat( - [ - self.price_history, - pd.DataFrame(self.price_pairs[:, 1]).transpose(), - ] - ) - if self.pricing_mode == "Continuous" or self.pricing_mode == "ToU": - self.price_history = pd.concat( - [ - self.price_history, - pd.DataFrame( - [self.pricing_parameters[0], self.pricing_parameters[1]] - ).transpose(), - ] - ) + self.electric_storage.charge_yn = 0 + self.electric_storage.discharge_yn = 1 + self.electric_storage.discharging_power = storage_power + self.electric_storage.charging_power = 0 - if charging_strategy == "uncontrolled": - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - charge_algos.uncontrolled( - env=self.env, - connected_vehicles=connected_vehicles, - charging_capacity=self.charging_capa, - planning_period_length=self.planning_interval, - ) + def take_dynamic_pricing_actions(self): + self.get_exp_free_grid_capacity() + self.update_vehicles_status() + self.take_pricing_action() + if self.pricing_mode == "Discrete": + self.price_history = pd.concat( + [ + self.price_history, + pd.DataFrame(self.price_pairs[:, 1]).transpose(), + ] + ) + if self.pricing_mode == "Continuous": + # print([self.pricing_parameters[1], self.parking_fee]) + self.price_history = pd.concat( + [ + self.price_history, + pd.DataFrame( + [self.pricing_parameters[0], self.pricing_parameters[1]] + ).transpose(), + ] + ) - if charging_strategy == "average_power": - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - charge_algos.average_power( - env=self.env, - connected_vehicles=connected_vehicles, - charging_capacity=self.charging_capa, - planning_period_length=self.planning_interval, - free_grid_capacity=self.free_grid_capa_actual, - ) + def take_static_pricing_action(self): + self.get_exp_free_grid_capacity() + self.update_vehicles_status() + if self.pricing_mode == "ToU": + hour = int((self.env.now % 1440) / 60) + self.pricing_parameters[0] = ( + self.electricity_tariff[hour] + / max(self.electricity_tariff) + * Configuration.instance().max_price_ToU + ) + if self.pricing_mode == "perfect_info": + if Configuration.instance().dynamic_fix_term_pricing: + self.pricing_parameters[1] = self.price_schedules[1][hour] + self.pricing_parameters[0] = self.price_schedules[0][hour] + else: + self.pricing_parameters[1] = self.price_schedules[hour] + if self.pricing_mode == "Discrete": + self.price_history = pd.concat( + [ + self.price_history, + pd.DataFrame(self.price_pairs[:, 1]).transpose(), + ] + ) + if self.pricing_mode == "Continuous" or self.pricing_mode == "ToU": + self.price_history = pd.concat( + [ + self.price_history, + pd.DataFrame( + [self.pricing_parameters[0], self.pricing_parameters[1]] + ).transpose(), + ] + ) - if charging_strategy == "first_come_first_served": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - charge_algos.first_come_first_served( - env=self.env, + def take_non_learning_charging_actions(self, charging_strategy, connected_vehicles): + strategy_functions = { + "uncontrolled": charge_algos.uncontrolled, + "average_power": charge_algos.average_power, + "first_come_first_served": charge_algos.first_come_first_served, + "earliest_deadline_first": charge_algos.earliest_deadline_first, + "least_laxity_first": charge_algos.least_laxity_first, + "equal_sharing": charge_algos.equal_sharing + } + + if charging_strategy in strategy_functions: + strategy_functions[charging_strategy](env=self.env, connected_vehicles=connected_vehicles, charging_stations=self.chargers, charging_capacity=self.charging_capa, free_grid_capacity=self.free_grid_capa_actual, - # free_battery_capacity = self.free_battery_load_capa, - planning_period_length=self.planning_interval, - ) + planning_period_length=self.planning_interval,) - if charging_strategy == "earliest_deadline_first": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - charge_algos.earliest_deadline_first( - env=self.env, - connected_vehicles=connected_vehicles, - charging_stations=self.chargers, - charging_capacity=self.charging_capa, - free_grid_capacity=self.free_grid_capa_actual, - # free_battery_capacity=self.free_battery_load_capa, - planning_period_length=self.planning_interval, - ) + if charging_strategy == "online_myopic": + charge_algos.online_myopic( + vehicles=connected_vehicles, + charging_stations=self.chargers, + env=self.env, + grid_capacity=self.free_grid_capa_actual, + optimization_period_length=self.optimization_period_length, + alpha=0, + ) - if charging_strategy == "least_laxity_first": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - # print([x.id for x in connected_vehicles]) - charge_algos.least_laxity_first( - env=self.env, - connected_vehicles=connected_vehicles, - charging_stations=self.chargers, - charging_capacity=self.charging_capa, - free_grid_capacity=self.free_grid_capa_actual, - # free_battery_capacity = self.free_battery_load_capa, - planning_period_length=self.planning_interval, - ) + # Charging algos that DO require foresight - if charging_strategy == "equal_sharing": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - charge_algos.equal_sharing( + if charging_strategy == "online_multi_period": + if max(self.charging_hub.grid.grid_usage) > self.peak_threshold: + self.peak_threshold = max(self.charging_hub.grid.grid_usage) + if len(connected_vehicles) > 0: + charge_algos.online_multi_period( + vehicles=connected_vehicles, charging_stations=self.chargers, - charging_capacity=self.charging_capa, - free_grid_capacity=self.free_grid_capa_actual, - free_battery_capacity=self.free_battery_load_capa, + env=self.env, + free_grid_capa_actual=self.free_grid_capa_actual, + free_grid_capa_predicted=self.free_grid_capa_predicted, + peak_load_history=self.peak_load_history, + electricity_cost=self.electricity_tariff, + sim_time=self.sim_time, + service_level=self.service_level, + optimization_period_length=self.optimization_period_length, + num_lookahead_planning_periods=4, + flex_margin=0.5, + peak_threshold=self.peak_threshold, ) - if charging_strategy == "online_myopic": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - charge_algos.online_myopic( + if charging_strategy == "integrated_storage": + if len(connected_vehicles) > 0: + charge_algos.integrated_charging_storage( + storage=self.electric_storage, vehicles=connected_vehicles, charging_stations=self.chargers, env=self.env, - grid_capacity=self.free_grid_capa_actual, + free_grid_capa_actual=self.free_grid_capa_actual, + free_grid_capa_predicted=self.free_grid_capa_predicted, + peak_load_history=self.peak_load_history, + electricity_cost=self.electricity_tariff, + sim_time=self.sim_time, + service_level=self.service_level, optimization_period_length=self.optimization_period_length, - alpha=0, + num_lookahead_planning_periods=12, + flex_margin=0.5, ) + def take_learning_charging_actions(self, charging_strategy): + if charging_strategy == "dynamic": + self.update_vehicles_status() + self.take_charging_action() + self.conduct_charging_action() - # Charging algos that DO require foresight - - if charging_strategy == "online_multi_period": - self.get_available_battery_load() + if self.storage_agent: self.get_exp_free_grid_capacity() - if max(self.charging_hub.grid.grid_usage) > self.peak_threshold: - self.peak_threshold = max(self.charging_hub.grid.grid_usage) - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - if len(connected_vehicles) > 0: - charge_algos.online_multi_period( - vehicles=connected_vehicles, - charging_stations=self.chargers, - env=self.env, - free_grid_capa_actual=self.free_grid_capa_actual, - free_grid_capa_predicted=self.free_grid_capa_predicted, - peak_load_history=self.peak_load_history, - electricity_cost=self.electricity_tariff, - sim_time=self.sim_time, - service_level=self.service_level, - optimization_period_length=self.optimization_period_length, - num_lookahead_planning_periods=4, - flex_margin=0.5, - peak_threshold=self.peak_threshold, - ) + self.take_storage_action() + self.conduct_storage_action() + + def update_learning_charging_and_pricing_agents(self, charging_strategy): + if charging_strategy == "dynamic": + self.update_charging_agent() + if self.storage_agent: + self.update_storage_agent() + if self.charging_hub.dynamic_pricing: + self.update_pricing_agent() - if charging_strategy == "integrated_storage": - self.get_exp_free_grid_capacity() - self.get_available_battery_load() - connected_vehicles = [x for x in self.requests if x.mode == "Connected"] - if len(connected_vehicles) > 0: - charge_algos.integrated_charging_storage( - storage=self.electric_storage, - vehicles=connected_vehicles, - charging_stations=self.chargers, - env=self.env, - free_grid_capa_actual=self.free_grid_capa_actual, - free_grid_capa_predicted=self.free_grid_capa_predicted, - peak_load_history=self.peak_load_history, - electricity_cost=self.electricity_tariff, - sim_time=self.sim_time, - service_level=self.service_level, - optimization_period_length=self.optimization_period_length, - num_lookahead_planning_periods=12, - flex_margin=0.5, - ) + def get_charging_schedules_and_prices(self, charging_strategy, mode): + """ + Periodically updates charging schedule based on selected strategy. Decides which vehicle charges and how much! + This is on a discrete time basis + :param scheduling_mode: simulation mode (discrete-time or discrete-event) + :param charging_strategy: + :param planning_period_length: length of period (in unit sim time). Schedule is re-computed every n(=period_length) time steps + :return: n/a + """ + # first_scheduling = False + if charging_strategy in ["perfect_info", "perfect_info_with_storage"]: + self.schedule_charging_and_routing_perfect_info(charging_strategy) + while True: + self.get_exp_free_grid_capacity() + self.get_available_battery_load() + connected_vehicles = [x for x in self.requests if x.mode == "Connected"] + if charging_strategy in ["perfect_info", "perfect_info_with_storage"]: + self.apply_charging_routing_storage_perfect_info(charging_strategy) - if charging_strategy == "dynamic": + if self.charging_hub.dynamic_pricing: + self.take_dynamic_pricing_actions() - self.get_exp_free_grid_capacity() - self.update_vehicles_status() - self.take_charging_action() - self.conduct_charging_action() + else: + self.take_static_pricing_action() - ### active these lines if we have separate battery agent - # self.get_exp_free_grid_capacity() - # self.take_storage_action() - # self.conduct_storage_action() + self.take_non_learning_charging_actions(charging_strategy, connected_vehicles) + self.take_learning_charging_actions(connected_vehicles) # update peak load history self.update_peak_load_history() @@ -489,13 +417,8 @@ def schedule_charging(strategy): yield self.env.timeout(self.planning_interval) if mode == "discrete_event": yield self.arrival_event - if charging_strategy == "dynamic": - # if len(connected_vehicles) > 0: - self.update_charging_agent() - ### active these lines if we have separate battery agent - # self.update_storage_agent() - if self.charging_hub.dynamic_pricing: - self.update_pricing_agent() + + self.update_learning_charging_and_pricing_agents(charging_strategy) def take_charging_action(self): state = self.charging_hub.charging_agent.environment.get_state( @@ -508,94 +431,75 @@ def take_charging_action(self): # while not self.done: action = self.charging_agent.pick_action(eval_ep, self.charging_hub) self.charging_agent.action = self.charging_agent.rescale_action(action) - self.action = self.charging_agent.action def take_pricing_action(self): - pricing_state = self.pricing_agent.environment.get_state( - self.charging_hub, self.env - ) + # Get current state from environment + pricing_state = self.pricing_agent.environment.get_state(self.charging_hub, self.env) self.pricing_agent.state = pricing_state - eval_ep = self.pricing_agent.do_evaluation_iterations - if Configuration.instance().pricing_mode == "Discrete": - # self.pricing_agent.episode_step_number_val = 0 - # while not self.done: - if self.pricing_agent.agent_name == "DQN": - self.pricing_agent.action = self.pricing_agent.pick_action() + pricing_mode = Configuration.instance().pricing_mode + agent_name = self.pricing_agent.agent_name - if self.pricing_agent.agent_name == "SAC": - self.pricing_agent.action = self.pricing_agent.pick_action( - eval_ep, self.charging_hub - ) + if pricing_mode == "Discrete": + if agent_name == "DQN": + self.pricing_agent.action = self.pricing_agent.pick_action() + if len(self.price_pairs[:, 1]) > 1: + vector_prices = convert_to_vector(self.pricing_agent.action) + else: + vector_prices = [self.pricing_agent.action] + final_pricing = self.pricing_agent.environment.get_final_prices_DQN(vector_prices) + for i, price in enumerate(final_pricing): + self.price_pairs[i, 1] = price - if self.pricing_agent.agent_name == "SAC": - rescaled_actions = self.pricing_agent.environment.rescale_action( - self.pricing_agent.action - ) + elif agent_name == "SAC": + self.pricing_agent.action = self.pricing_agent.pick_action(eval_ep, self.charging_hub) + rescaled_actions = self.pricing_agent.environment.rescale_action(self.pricing_agent.action) number_of_power_options = len(self.price_pairs[:, 1]) - final_pricing = rescaled_actions[0:number_of_power_options] - # for i in range(len(final_pricing)): + final_pricing = rescaled_actions[:number_of_power_options] self.price_pairs[0, 1] = final_pricing[0] self.price_pairs[1, 1] = min(final_pricing[1], 1.5) + + # Optional: handle grid capacity and storage # if Configuration.instance().limiting_grid_capa: # self.grid_capa = rescaled_actions[number_of_power_options] # if len(rescaled_actions) >= number_of_power_options + 2: - # self.storage_action = [rescaled_actions[number_of_power_options+1]] - # self.conduct_storage_action() - if self.pricing_agent.agent_name == "DQN": - if len(self.price_pairs[:, 1]) > 1: - vector_prices = convert_to_vector(self.pricing_agent.action) - else: - vector_prices = [self.pricing_agent.action] - final_pricing = self.pricing_agent.environment.get_final_prices_DQN( - vector_prices - ) - for i in range(len(final_pricing)): - self.price_pairs[i, 1] = final_pricing[i] + # self.storage_agent.action = [rescaled_actions[number_of_power_options + 1]] + # self.conduct_storage_action() - if Configuration.instance().pricing_mode == "Continuous": - self.pricing_agent.action = self.pricing_agent.pick_action( - eval_ep, self.charging_hub - ) - rescaled_actions = self.pricing_agent.environment.rescale_action( - self.pricing_agent.action - ) - # self.pricing_parameters[0] = rescaled_actions[0] - if ( - not Configuration.instance().dynamic_fix_term_pricing - and Configuration.instance().capacity_pricing - ): + elif pricing_mode == "Continuous": + self.pricing_agent.action = self.pricing_agent.pick_action(eval_ep, self.charging_hub) + rescaled_actions = self.pricing_agent.environment.rescale_action(self.pricing_agent.action) + + config = Configuration.instance() + if not config.dynamic_fix_term_pricing and config.capacity_pricing: self.pricing_parameters[1] = rescaled_actions[0] - if ( - Configuration.instance().dynamic_fix_term_pricing - and not Configuration.instance().capacity_pricing - ): + + elif config.dynamic_fix_term_pricing and not config.capacity_pricing: self.pricing_parameters[0] = rescaled_actions[0] - if Configuration.instance().dynamic_parking_fee: + if config.dynamic_parking_fee: self.parking_fee = rescaled_actions[1] - if ( - Configuration.instance().dynamic_fix_term_pricing - and Configuration.instance().capacity_pricing - ): + + elif config.dynamic_fix_term_pricing and config.capacity_pricing: self.pricing_parameters[0] = rescaled_actions[0] self.pricing_parameters[1] = rescaled_actions[1] - if Configuration.instance().limiting_grid_capa: + if config.limiting_grid_capa: self.grid_capa = rescaled_actions[1] - if Configuration.instance().dynamic_storage_scheduling: - self.storage_action = [rescaled_actions[1]] - self.conduct_storage_action() - self.charging_hub.grid.reset_reward() + if config.dynamic_storage_scheduling: + self.storage_agent.action = [rescaled_actions[1]] + + self.conduct_storage_action(given_storage_action=[rescaled_actions[1]]) + + # Reset reward at the end + self.charging_hub.grid.reset_reward() #TODO: it does not belong to the grid object def take_storage_action(self): - self.storage_state = self.charging_hub.storage_agent.environment.get_state( + storage_state = self.charging_hub.storage_agent.environment.get_state( self.charging_hub, self.env ) - self.storage_agent.state = self.storage_state - - # self.get_battery_max_min() + self.storage_agent.state = storage_state eval_ep = self.storage_agent.do_evaluation_iterations self.storage_agent.episode_step_number_val = 0 @@ -603,7 +507,6 @@ def take_storage_action(self): self.storage_agent.action = self.storage_agent.pick_action( eval_ep, self.charging_hub ) - self.storage_action = self.storage_agent.action def get_battery_max_min(self): bound_1 = ( @@ -629,10 +532,12 @@ def get_battery_max_min(self): discharging_bound = max(bound_1, bound_2, bound_3) self.charging_hub.max_battery_discharging_rate = discharging_bound self.storage_agent.action_range = [discharging_bound, charging_bound] - # print(self.charging_hub.max_battery_charging_rate, self.electric_storage.SoC ,self.charging_hub.max_battery_discharging_rate) - def check_storage(self): - storage_power = self.storage_action[0] + def check_storage(self, given_storage_action=None): + if given_storage_action: + storage_power = given_storage_action[0] + else: + storage_power = self.storage_agent.action[0] if storage_power >= 0: if ( self.storage_object.SoC @@ -682,87 +587,67 @@ def check_storage(self): self.charging_hub.electric_storage.charging_power = 0 self.charging_hub.electric_storage.discharge_yn = 1 self.charging_hub.electric_storage.discharging_power = -storage_power + if given_storage_action: + raw_storage_power = given_storage_action[0] + else: + raw_storage_power = self.storage_agent.action[0] + self.charging_hub.reward["feasibility_storage"] += abs( - self.storage_action[0] - storage_power + raw_storage_power - storage_power ) - # print(self.charging_hub.electric_storage.charging_power, self.charging_hub.electric_storage.discharging_power) - # print(self.storage_action[0], storage_power, self.electric_storage.SoC) - # self.storage_action[0] = storage_power - - def check_charging_power(self): - evaluation = False # self.charging_agent.do_evaluation_iterations - # Checking storage action - # First we ensure that the charging load is less that storage capacity and free grid power - storage_power = 0 # self.action[0] - all_charging_vehicles = np.asarray([]) - i = 0 - for charger in self.charging_hub.chargers: - charging_vehicles = charger.charging_vehicles - charger_usage = 0 - charging_power_list = np.array( - [v.charging_power for v in charging_vehicles] - ) - for j in range(charger.number_of_connectors): - try: - charging_vehicles[j].action_id = i + 1 - if charging_vehicles[j].remaining_energy_deficit <= 0: - self.charging_hub.reward["feasibility"] += self.action[i + 1] - if evaluation: - self.action[i + 1] = 0 - except: - self.charging_hub.reward["feasibility"] += self.action[i + 1] - if evaluation: - self.action[i + 1] = 0 - i += 1 - - for vehicle in charging_vehicles: - if vehicle.remaining_energy_deficit <= 0: - vehicle.charging_power = 0 - charger_usage += vehicle.charging_power - all_charging_vehicles = np.append(all_charging_vehicles, vehicle) - self.charging_hub.reward["feasibility"] += max( - charger_usage - charger.power, 0 - ) - while charging_power_list.sum() > charger.power: - number_active_chargers = len(charging_power_list) - surplus_per_charger = ( - max(charging_power_list.sum() - charger.power + 0.1, 0) - / number_active_chargers - ) - for vehicle in charging_vehicles: - vehicle.charging_power -= surplus_per_charger - vehicle.charging_power = max(vehicle.charging_power, 0) - charging_power_list = np.array( - [v.charging_power for v in charging_vehicles] - ) - total_charging_power_list = np.array( - [v.charging_power for v in all_charging_vehicles] - ) - grid_capa = min(self.action[0], self.free_grid_capa_actual[0]) - self.charging_hub.reward["feasibility"] += max( - total_charging_power_list.sum() - grid_capa, 0 + def check_storage(self, given_storage_action=None): + # Determine the raw storage power action + raw_storage_power = ( + given_storage_action[0] if given_storage_action else self.storage_agent.action[0] ) + storage_power = raw_storage_power - while total_charging_power_list.sum() + storage_power > grid_capa + 1: - number_active_chargers = len(total_charging_power_list) - surplus_per_charger = ( - max(total_charging_power_list.sum() + storage_power - grid_capa, 0) - / number_active_chargers - ) - for vehicle in all_charging_vehicles: - vehicle.charging_power -= surplus_per_charger - vehicle.charging_power = max(vehicle.charging_power, 0) - if evaluation: - self.action[vehicle.action_id] = vehicle.charging_power - total_charging_power_list = np.array( - [v.charging_power for v in all_charging_vehicles] - ) - if evaluation: - self.charging_hub.reward["feasibility"] = 0 + interval_factor = self.charging_hub.planning_interval / 60 + soc = self.storage_object.SoC + max_energy = self.storage_object.max_energy_stored_kWh + grid_capacity = self.charging_hub.operator.free_grid_capa_without_storage - def conduct_storage_action(self): - storage_power = self.storage_action[0] + # Handle charging + if storage_power >= 0: + projected_soc = soc + storage_power * interval_factor + if projected_soc > max_energy: + storage_power = (max_energy - soc) / interval_factor + storage_power = min(storage_power, grid_capacity) + + self.charging_hub.electric_storage.charge_yn = 1 + self.charging_hub.electric_storage.charging_power = storage_power + self.charging_hub.electric_storage.discharge_yn = 0 + self.charging_hub.electric_storage.discharging_power = 0 + + # Handle discharging + else: + hub_generation_kW = self.get_hub_generation_kW() + hub_demand_kW = self.get_hub_load_kW() + + if not self.B2G and (storage_power + hub_demand_kW - hub_generation_kW < 0): + storage_power = -(hub_demand_kW - hub_generation_kW) + + projected_soc = soc + storage_power * interval_factor + if projected_soc < 0: + storage_power = -max(soc / interval_factor, 0) + + if soc <= 0: + storage_power = 0 + + self.charging_hub.electric_storage.charge_yn = 0 + self.charging_hub.electric_storage.charging_power = 0 + self.charging_hub.electric_storage.discharge_yn = 1 + self.charging_hub.electric_storage.discharging_power = -storage_power + + # Track feasibility deviation + self.charging_hub.reward["feasibility_storage"] += abs(raw_storage_power - storage_power) + + def conduct_storage_action(self, given_storage_action=None): + if given_storage_action: + storage_power = given_storage_action[0] + else: + storage_power = self.storage_agent.action[0] if storage_power >= 0: self.charging_hub.electric_storage.charge_yn = 1 self.charging_hub.electric_storage.charging_power = storage_power @@ -773,121 +658,85 @@ def conduct_storage_action(self): self.charging_hub.electric_storage.charging_power = 0 self.charging_hub.electric_storage.discharge_yn = 1 self.charging_hub.electric_storage.discharging_power = -storage_power - self.check_storage() + self.check_storage(given_storage_action=given_storage_action) def conduct_charging_action(self): - action = self.action - i = 0 + action = self.charging_agent.action + action_index = 1 # Start from 1 because action[0] is reserved (possibly for pricing or metadata) + for charger in self.charging_hub.chargers: - charging_vehicles = charger.charging_vehicles - for connector in range(charger.number_of_connectors): - if action[i + 1] > 0: - try: - charging_vehicles[connector].charging_power = action[i + 1] - except: - pass - i += 1 + for connector_idx in range(charger.number_of_connectors): + if action_index >= len(action): + break # Prevent index error if action list is shorter than expected + + charging_power = action[action_index] + if charging_power > 0: + charging_vehicles = charger.charging_vehicles + if connector_idx < len(charging_vehicles): + vehicle = charging_vehicles[connector_idx] + vehicle.charging_power = charging_power + action_index += 1 + self.check_charging_power() self.charging_hub.grid.reset_reward() def update_pricing_agent(self): self.update_vehicles_status() + if not self.charging_agent: - self.charging_hub.reward["missed"] = ( - self.reward_computing() - ) # TODO: do we need to recalculate it? - if self.pricing_agent.agent_name == "new_SAC": - if len(self.pricing_agent.memory) > self.pricing_agent.config.batch_size: - # Number of updates per step in environment - for i in range(self.pricing_agent.config.updates_per_step): - # Update parameters of all the networks - critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = ( - self.pricing_agent.update_parameters( - self.pricing_agent.memory, - self.pricing_agent.batch_size, - self.pricing_agent.updates, - ) - ) - self.pricing_agent.updates += 1 + # TODO: do we need to recalculate it? + self.charging_hub.reward["missed"] = self.reward_computing() - next_state, reward, done, _ = self.pricing_agent.environment.step( - self.pricing_agent.action, self.charging_hub, self.env - ) - # print(self.pricing_agent.action) - mask = ( - False - if self.pricing_agent.global_step_number - >= self.pricing_agent.environment._max_episode_steps - else self.pricing_agent.done - ) - self.pricing_agent.memory.push( - self.pricing_agent.state, self.pricing_agent.action, reward, next_state, mask - ) - self.pricing_agent.state = next_state + agent = self.pricing_agent + agent_name = agent.agent_name + config = agent.config - # SAC - if self.pricing_agent.agent_name == "SAC": - self.pricing_agent.conduct_action( - self.pricing_agent.action, self.charging_hub, self.env - ) - eval_ep = self.pricing_agent.do_evaluation_iterations - if self.pricing_agent.time_for_critic_and_actor_to_learn(): - if not eval_ep: - for _ in range( - self.pricing_agent.hyperparameters[ - "learning_updates_per_learning_session" - ] - ): - self.pricing_agent.learn() - mask = ( - False - if self.pricing_agent.global_step_number - >= self.pricing_agent.environment._max_episode_steps - else self.pricing_agent.done - ) - # if not eval_ep: - action = self.pricing_agent.action - # action = self.pricing_agent.descale_action(self.pricing_agent.action, self.charging_hub) - # print(self.pricing_state, action, self.pricing_agent.reward, self.pricing_agent.next_state) - self.pricing_agent.save_experience( + if agent_name == "SAC": + agent.conduct_action(agent.action, self.charging_hub, self.env) + eval_ep = agent.do_evaluation_iterations + + if agent.time_for_critic_and_actor_to_learn() and not eval_ep: + for _ in range(agent.hyperparameters["learning_updates_per_learning_session"]): + agent.learn() + + mask = False if agent.global_step_number >= agent.environment._max_episode_steps else agent.done + + agent.save_experience( experience=( - self.pricing_agent.state, - action, - self.pricing_agent.reward, - self.pricing_agent.next_state, + agent.state, + agent.action, + agent.reward, + agent.next_state, mask, ) ) - if self.pricing_agent.agent_name == "DQN": - self.pricing_agent.conduct_action( - self.pricing_agent.action, self.charging_hub, self.env - ) - if self.pricing_agent.time_for_q_network_to_learn(): - for _ in range( - self.pricing_agent.hyperparameters["learning_iterations"] - ): - self.pricing_agent.learn() - action = self.pricing_agent.action - self.pricing_agent.save_experience( + elif agent_name == "DQN": + agent.conduct_action(agent.action, self.charging_hub, self.env) + + if agent.time_for_q_network_to_learn(): + for _ in range(agent.hyperparameters["learning_iterations"]): + agent.learn() + + agent.save_experience( experience=( - self.pricing_agent.state, - action, - self.pricing_agent.reward, - self.pricing_agent.next_state, + agent.state, + agent.action, + agent.reward, + agent.next_state, False, ) ) - self.pricing_agent.global_step_number += 1 + agent.global_step_number += 1 def update_storage_agent(self): eval_ep = self.storage_agent.do_evaluation_iterations action = self.storage_agent.descale_action( - self.storage_action, self.charging_hub + self.storage_agent.action, self.charging_hub ) - self.storage_agent.conduct_action(action, self.charging_hub, self.env) + self.storage_agent.conduct_action(action, self.charging_hub, self.env, eval_ep=eval_ep) if self.storage_agent.time_for_critic_and_actor_to_learn(): for _ in range( self.storage_agent.hyperparameters[ @@ -901,13 +750,11 @@ def update_storage_agent(self): >= self.storage_agent.environment._max_episode_steps else self.storage_agent.done ) - if mask: - print("there is problem with mask") # if not eval_ep: self.storage_agent.save_experience( experience=( - self.storage_state, + self.storage_agent.state, action, self.storage_agent.reward, self.storage_agent.next_state, @@ -922,7 +769,7 @@ def update_charging_agent(self): self.charging_hub.reward["missed"] = self.reward_computing() eval_ep = self.charging_agent.do_evaluation_iterations - self.charging_agent.conduct_action(self.action, self.charging_hub, self.env) + self.charging_agent.conduct_action(self.charging_agent.action, self.charging_hub, self.env) if self.charging_agent.time_for_critic_and_actor_to_learn(): if not eval_ep: for _ in range( @@ -937,10 +784,8 @@ def update_charging_agent(self): >= self.charging_agent.environment._max_episode_steps else self.charging_agent.done ) - if mask: - print("there is problem with mask") # if not eval_ep: - action = self.charging_agent.descale_action(self.action, self.charging_hub) + action = self.charging_agent.descale_action(self.charging_agent.action, self.charging_hub) self.charging_agent.save_experience( experience=( self.charging_agent.state, @@ -1034,11 +879,8 @@ def update_peak_load_history(self): t : t + self.planning_interval - 1 ]["pv_generation"] ) - # battery_charge = self.electric_storage.charge_yn * self.electric_storage.charging_power - # battery_discharge = self.electric_storage.discharge_yn * self.electric_storage.discharging_power planning_window_peak_load = charging_load + baseload_max - generation_min - # planning_window_peak_load = charging_load + baseload_max + battery_charge - generation_min - battery_discharge self.peak_load_history.append(planning_window_peak_load) def update_peak_load_history_inc_storage(self): @@ -1116,18 +958,10 @@ def get_hub_generation_kW(self): ########################################## # BELOW FUNCTIONS EXECUTE DECISIONS - def request_queueing(self): # WHAT EXACTLY DOES THIS FUNCTION DO? + def request_queueing(self): while True: not_arrived_requests = [x for x in self.requests if x.mode is None] if len(not_arrived_requests) > 0: - # request = not_arrived_requests[0] - # interarrival_time = (request.arrival_period - self.env.now) - # yield self.env.timeout(interarrival_time) - # request.mode = 'Arrived' - # if self.multiple_power: - # request.adjust_request_demand_based_on_pricing(self.price_pairs) - # self.env.process(self.assign_parking_charging_resources(request)) - # self.env.process(self.request_process(request)) request = not_arrived_requests[0] interarrival_time = request.arrival_period - self.env.now yield self.env.timeout(interarrival_time)