From 678c06c44b8e5fbfe01e34d1425098fae2acc420 Mon Sep 17 00:00:00 2001 From: SamuelGoodday Date: Tue, 27 Apr 2021 12:02:53 +0200 Subject: [PATCH] Add code initially --- asam classes/Asset.py | 193 +++ asam classes/ExogeniousDatabase.py | 316 ++++ asam classes/GridSystemOperator.py | 352 +++++ asam classes/Main.py | 177 +++ asam classes/MarketModel.py | 259 +++ asam classes/MarketOperator.py | 1221 +++++++++++++++ asam classes/MarketParty.py | 2334 ++++++++++++++++++++++++++++ asam classes/OrderMessage.py | 125 ++ asam classes/Orderbook.py | 282 ++++ asam classes/Reports.py | 777 +++++++++ asam classes/Time.py | 196 +++ asam classes/Visualization.py | 766 +++++++++ 12 files changed, 6998 insertions(+) create mode 100644 asam classes/Asset.py create mode 100644 asam classes/ExogeniousDatabase.py create mode 100644 asam classes/GridSystemOperator.py create mode 100644 asam classes/Main.py create mode 100644 asam classes/MarketModel.py create mode 100644 asam classes/MarketOperator.py create mode 100644 asam classes/MarketParty.py create mode 100644 asam classes/OrderMessage.py create mode 100644 asam classes/Orderbook.py create mode 100644 asam classes/Reports.py create mode 100644 asam classes/Time.py create mode 100644 asam classes/Visualization.py diff --git a/asam classes/Asset.py b/asam classes/Asset.py new file mode 100644 index 0000000..2fba4d9 --- /dev/null +++ b/asam classes/Asset.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Sep 1 10:26:57 2017 +@author: Samuel Glismann +Asset class of ASAM + +Assets are owned by market party agents. +Asset attributes largely correspond to PyPSA attributes (see PyPSA for more explanation). + +Assets have furthermore an asset (dispatch) schedule as well as a constraint dataframe. +The constraint dataframe is used by PyPSA for dispatch optimization and by the agent to determine available capacity. + +Methods: + - init() + - calc_dispatch_constraints(self, this_round_redispatch) + - get_as_df() + +""" +import pandas as pd +from pandas import Series, DataFrame +import numpy as np + + +class Asset(): + def __init__(self, model, assetname = None, pmax = 1, pmin = 0, location = None, + srmc = 0, assetowner=None, ramp_limit_up =None, ramp_limit_down = None, + min_up_time =0, min_down_time = 0, start_up_cost =0, shut_down_cost =0, + ramp_limit_start_up =1, ramp_limit_shut_down=1): + + self.model = model + self.assetID = assetname + self.pmax = pmax #MW + self.pmin = pmin #MW + self.location = location #area name + self.srmc = int(round(srmc,0)) #EUR/MW + self.assetowner = assetowner + self.ramp_limit_up = ramp_limit_up #p.u. of pmax: max active power increase from one MTU to next + self.ramp_limit_down = ramp_limit_down #p.u. of pmax: max active power increase from one MTU to next + self.ramp_limit_start_up = ramp_limit_start_up #p.u. of pmax: max active power increase from one MTU to next at start up + self.ramp_limit_shut_down = ramp_limit_shut_down #p.u. of pmax: max active power increase from one MTU to next at shut down + self.min_up_time = min_up_time #minimum number of subsequent MTU commited + self.min_down_time = min_down_time #minimum number of subsequent MTU nont commited + self.start_up_cost = int(round(start_up_cost,0)) #EUR + self.shut_down_cost = int(round(shut_down_cost,0)) #EUR + + # DataFrame for initial schedules before day-ahead results or redispatch results are available + init_sched = model.clock.asset_schedules_horizon() + init_sched['commit'] = 0 + + #asset dispatch schedule (including the past, i.e. commitment of past simulation steps) + self.schedule = DataFrame(columns =['commit','p_max_t','p_min_t']) + self.schedule['commit'] =init_sched['commit'] + self.schedule['available_up'] = self.pmax + #initially (before first agent.step) the asset is set to 0. + self.schedule['available_down'] = 0 + + #this variable logs the dispatch schedule during the redispatch bidding. + #it is the baseline for dispatch limits during the schedule horizon (i.e. past dispatch excluded) + self.schedule_at_redispatch_bidding = init_sched + #cumulative contraints per round: previous contraints are taken into account. + #values of past MTU are excluded + self.constraint_df = init_sched + self.constraint_df['upward_commit'] = 0 + self.constraint_df['downward_commit'] = 0 + self.constraint_df['dispatch_limit'] = 0 + self.constraint_df['previous_dispatch'] = 0 + self.constraint_df['p_max_t'] = self.pmax + + #planned unavailability from scenario, in case the asset name is listed with unavailabilities in pmax p.u. + self.planned_unavailability = DataFrame() + if isinstance(self.model.exodata.DA_residual_load, DataFrame): + if (self.model.exodata.DA_residual_load.columns.isin([self.assetID]).any()): + self.planned_unavailability = self.model.exodata.DA_residual_load[[ + 'delivery_day','delivery_time','delivery_hour',self.assetID]] + #from p.u. to MW + self.planned_unavailability[self.assetID]=self.planned_unavailability[self.assetID]*self.pmax + self.planned_unavailability.set_index(['delivery_day','delivery_time'], inplace=True) + #rename unavailability + self.planned_unavailability.rename(columns={self.assetID : 'p_max_t'}, inplace=True) + #adjust p_max_t and p_min_t, because otherwise it can happen that pmax =self.pmin, 0) + self.planned_unavailability['p_min_t'] = Series([self.pmin]*len(self.planned_unavailability), + index=self.planned_unavailability.index).where(self.planned_unavailability[ + 'p_max_t'] >= self.pmin, 0) + + #adjust constraint_df based on planned unavailability + self.constraint_df['p_max_t'] = self.planned_unavailability.loc[ + self.constraint_df.index,'p_max_t'].values + self.constraint_df['p_min_t'] = self.planned_unavailability.loc[ + self.constraint_df.index,'p_min_t'].values + + def calc_dispatch_constraints(self, this_round_redispatch): + + """ + Method: administers redispatch transactions in asset constraint dataframe. + Input: agent.set_asset_commit_constraints () executes this method and + provides the aggregated redispatch values as dataframe + + In case an asset is associated with a redispatch transaction, + additional constraints are applicable to the dispatch optimization. + + In case of an upward redispatch transaction, the asset is bound to a + dispatch above the last dispatch schedule + upward redispatch quantity . + In case of a downward redispatch, the asset is bound to a dispatch + below the last disaptch schedule - downward redispatch quantity + """ + print('calculate dispatch contraints of ',self.assetID) + #forget the past mtu and add new horizon mtu to constraint_df + self.constraint_df = self.constraint_df.loc[this_round_redispatch.index] + #add p_max_t for new schedule horizon + if self.planned_unavailability.empty: + self.constraint_df['p_max_t']= self.pmax + self.constraint_df['p_min_t'] = self.pmin + else: + self.constraint_df['p_max_t'] = self.planned_unavailability.loc[ + this_round_redispatch.index, 'p_max_t'].values + self.constraint_df['p_min_t'] = self.planned_unavailability.loc[ + this_round_redispatch.index, 'p_min_t'].values + + self.constraint_df[['commit','upward_commit','downward_commit']] = self.constraint_df[ + ['commit','upward_commit','downward_commit']].fillna(value=0) + + if (this_round_redispatch['commit'] != 0).any(): + #get new redispatch commit (and overwrite previous redispatch commit + #(previous redispatch is now stored in limit dispatch and asset limits) + self.constraint_df['commit'] = this_round_redispatch['commit'] + + #store all redispatch commitments per direction + self.constraint_df['upward_commit'] = self.constraint_df[ + 'upward_commit'].fillna(value=0) + self.constraint_df['commit'].where( + self.constraint_df['commit']>0, 0) + self.constraint_df['downward_commit'] = self.constraint_df[ + 'downward_commit'].fillna(value=0) + self.constraint_df['commit'].where( + self.constraint_df['commit']<0,0) + + #get dispatch schedule from bidding moment (as reference point) + self.constraint_df['previous_dispatch'] = self.schedule_at_redispatch_bidding[ + 'commit'].loc[self.model.schedules_horizon.index] + #because the previous dispatch respected the previous asset constraints + #there is no need for superposition of the dispatch limit. + #example: asset dispatch schedule at 90 MW. New redispatch commit +10 MW, + # it means that constraint_df allowed for 10 MW sell bids. + # no need to check if there was downward redispatch in previous rounds + self.constraint_df['dispatch_limit'] = (self.constraint_df['previous_dispatch' + ] + self.constraint_df['commit']).round(0).astype(int) + + + if (self.model.RD_marketoperator.rules['order_types'] == 'limit_block')|( + self.model.RD_marketoperator.rules['order_types'] == 'limit_ISP')|( + self.model.RD_marketoperator.rules['order_types'] == 'IDCONS_orders'): + #check dispatch_limit feasibility + unfeasible = self.constraint_df.loc[(self.constraint_df['dispatch_limit']>self.pmax)| + (self.constraint_df['dispatch_limit']<0)] + elif (self.model.RD_marketoperator.rules['order_types'] == 'all_or_none_block')|( + self.model.RD_marketoperator.rules['order_types'] == 'all_or_none_ISP'): + """ for all-or-none ordertypes the dispatch limit may not lie below pmin""" + #check dispatch_limit feasibility + unfeasible = self.constraint_df.loc[(self.constraint_df['dispatch_limit']>self.pmax)|( + (self.constraint_df['dispatch_limit']= self.constraint_df['p_min_t']), 0) + self.constraint_df['p_min_t'] = self.constraint_df['dispatch_limit'].where( + ((self.constraint_df['upward_commit'] != 0)&( + self.constraint_df['dispatch_limit']>self.constraint_df['p_min_t'])), + self.constraint_df['p_min_t']) + + + def get_as_df(self): + names = ["agent_id", "asset_id", "pmax", "pmin", "location", "srmc"] + df = DataFrame ([[self.assetowner, self.assetID, self.pmax, self.pmin, self.location, self.srmc]], columns = names) + return (df) + + + + + + diff --git a/asam classes/ExogeniousDatabase.py b/asam classes/ExogeniousDatabase.py new file mode 100644 index 0000000..30ba7c9 --- /dev/null +++ b/asam classes/ExogeniousDatabase.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Oct 30 16:45:23 2017 +@author: Samuel Glismann +""" + +from mesa import Agent, Model +import pandas as pd +from pandas import Series, DataFrame +import random +import numpy as np +from mesa import Agent, Model + + +class ExoData(): + def __init__(self, model, simulation_parameters): + self.model = model + self.sim_task = None + self.market_rules= None + self.asset_portfolios = None + self.congestions = None #todo: change to redispatch_demand + self.agent_strategies = None + self.forecast_errors= None + self.DA_residual_load = None + self.opportunity_costs_db=None + self.sim_name =None + self.output_path =None + self.IBP_kde_pdfs =DataFrame() + self.IBP_exo_prices=None + self.solver_name = None + self.control_state_probabilities = DataFrame() + self.read_check_parameters(simulation_parameters) + + + + def read_check_parameters(self,simulation_parameters): + """TODO: place more checks on the input parameters""" + print('check input data') + if not isinstance(simulation_parameters, dict): + raise Exception('simulations parameters are not provided as dictionary') + + try: + self.sim_task=simulation_parameters['simulation_task'] + except: + raise Exception('simulation task not part of input parameters') + + self.sim_name = self.sim_task['simulation_name'] + self.output_path = simulation_parameters['output_path'] + + try: + self.market_rules=simulation_parameters['market_rules'] + except: + raise Exception('market_rules not part of input parameters') + try: + self.agent_strategies=simulation_parameters['agent_strategies'] + except: + raise Exception('agent_strategies not part of input parameters') + try: + self.asset_portfolios=simulation_parameters['portfolios'] + if 'Type' in self.asset_portfolios: + #exclude 'artificial generators' from system_pmax for residual load, congestion scaling + system_pmax = self.asset_portfolios['pmax'].loc[ + self.asset_portfolios['Type']!='small flex aggregated'].sum() + else: + system_pmax = self.asset_portfolios['pmax'].sum() + except: + raise Exception('asset portfolios not part of input parameters') + + #optional input parameters + try: + if (self.sim_task['congestions'] =='from_scenario')|(( + self.sim_task['residual_load_scenario'] !='flat_resload_profile')&( + self.sim_task['residual_load_scenario'] !='24h_residual_load_profile'))|( + self.sim_task['forecast_errors'] =='from_scenario'): + idx = pd.IndexSlice + #comes as multicolumn index. here it gets converted. + if pd.__version__ =='0.24.2': + self.DA_residual_load =simulation_parameters['da_residual_load'].loc[ + :,idx[self.sim_task['residual_load_scenario'],:]] + + elif pd.__version__ =='0.19.1': + self.DA_residual_load =simulation_parameters['da_residual_load'].sort_index(axis=1).loc[ + :,idx[self.sim_task['residual_load_scenario'],:]] + + + self.DA_residual_load.columns = self.DA_residual_load.columns.droplevel(0) + + #delete all rows containing NA in 'residual_load_DA' column. + #This can happen if various RES load scenarios are in input data. + self.DA_residual_load.dropna(axis=0,subset=['residual_load_DA'], inplace=True) + + + #convert p.u. to MW and round to full MW + mask = self.DA_residual_load.columns.isin(['residual_load_DA','FE_residual_load','congestion_MW', + 'load_DA_cor','wind_DA', 'sun_DA']) + self.DA_residual_load.loc[:, mask] = self.DA_residual_load.loc[:,mask] * system_pmax + self.DA_residual_load.loc[:, mask] =self.DA_residual_load.loc[:, mask].round(0).astype(int) + + except: + if self.market_rules.loc['acquisition_method','DAM']=='single_hourly_auction': + raise Exception('DA_residual_load not part of input parameters. required for DAM single_hourly_auction') + + try: + if self.sim_task['congestions'] =='from_scenario': + #forecast error is obtained fom residual load data. forecast error in tab are ignored + self.congestions = self.DA_residual_load[['delivery_day','delivery_time','congestion_MW' + ,'redispatch_areas_down', + 'redispatch_areas_up', + 'identification_day', + 'identification_mtu']] + self.congestions.set_index(['delivery_day','delivery_time'], inplace=True) + elif self.sim_task['congestions'] =='exogenious': + self.congestions = simulation_parameters['congestions'] + + except: + if (self.sim_task['run_RDM[y/n]']=='y')&(self.sim_task['congestions']=='None'): + import pdb + pdb.set_trace() + raise Exception('congestion required input parameters, when running redispatch simulation') + try: + if self.sim_task['forecast_errors'] =='from_scenario': + #forecast error is obtained fom residual load data. forecast error in tab are ignored + self.forecast_errors = self.DA_residual_load[['delivery_day','delivery_time','FE_residual_load']] + + elif self.sim_task['forecast_errors'] =='exogenious': + self.forecast_errors=simulation_parameters['forecast_errors'] + else: + pass + except: + raise Exception('forecast error input required, when running exogenious forecast error allocation') + + try: + self.opportunity_costs_db=simulation_parameters['opportunity_costs'] + except: + if (self.agent_strategies =='opportunity_markup').any().any(): + raise Exception('opportunity costs estimates required for agent strategies') + try: + self.IBP_kde_pdfs=simulation_parameters['IBP_kde_pdfs'] + except: + #this is not yet fully consistent. + if self.market_rules.loc['acquisition_method','BEM']=='control_states_only': + raise Exception('Balancing energy method based on probability samples requires IBP pdfs to estimate BEP') + try: + self.control_state_probabilities= simulation_parameters['control_state_probabilities'] + except: + #this is not yet fully consistent. + if self.market_rules.loc['acquisition_method','BEM']=='control_states_only': + raise Exception('Balancing energy method based on probability samples requires control_state_probs to estimate control state') + try: + self.IBP_exo_prices= simulation_parameters['IBP_exo_prices'] + except: + #this is not yet fully consistent. + if self.market_rules.loc['pricing_method','IBM']=='exogenious': + raise Exception('Imbalance pricing method -exogenious- expects a dataframe with short and long prices as well as timestamps') + + + if self.sim_task['start_day'] =='from_scenario': + self.sim_task['start_day'] = self.DA_residual_load['delivery_day'].iloc[0] + if self.sim_task['start_MTU'] =='from_scenario': + self.sim_task['start_MTU'] = self.DA_residual_load['delivery_time'].iloc[0] + if self.sim_task['number_steps'] == 'from_scenario': + self.sim_task['number_steps'] = len(self.DA_residual_load) + + try: + #used for PyPSA + self.solver_name = self.sim_task['solver_name'] + except: + raise Exception ('Solver name needed, that is installed and recognisable by Pyomo, e.g. "glpk", "gurobi", "cbc"') + + + + def get_DA_resload(self, timestamps_df, mode = None): + """timestamps_df must contain 'delivery_day' and must contain + either 'delivery_hour' or 'delivery_time' (for later use)""" + + if mode == 'flat_resload_profile': + system_pmax = self.asset_portfolios['pmax'].sum() + resload = [0.8 * system_pmax] * len(timestamps_df) + elif mode == '24h_residual_load_profile': + #here a list of 24h values can be edited manually. + #These values will be used as residual load for every DA market simulation (independent from day en time) + data_lst = [ + 0.6, + 0.6, + 0.6, + 0.6, + 0.6, + 0.6, + 0.7, + 0.7, + 0.7, + 0.7, + 0.7, + 0.7, + 0.9, + 0.9, + 0.9, + 0.9, + 0.9, + 0.9, + 0.7, + 0.7, + 0.7, + 0.7, + 0.7, + 0.7] + system_pmax = self.asset_portfolios['pmax'].sum() + resload = [i*system_pmax for i in data_lst] + #ensure that the residual load curve starts start of the DA auction (especially in the first step) + #done by doubeling the list and starting at the hour + resload = resload * 2 + resload=resload[len(resload) - len(timestamps_df):] + else: + #residual load from scenarios for specific timestamps + if 'delivery_hour' in timestamps_df.columns: + #DA runs on hours.So residual load must be grouped to hours + resload = self.DA_residual_load.groupby(by=['delivery_day', 'delivery_hour']).mean()['residual_load_DA'] + #filter on day and mtu list + resload = resload.loc[resload.index.isin(list( + timestamps_df.set_index(['delivery_day','delivery_hour']).index.values))].copy() + resload = list(resload.astype('int')) + else: + raise Exception(' get_DA_load has delivery_mtu not yet implemented. only delivery_hour') + return (resload) + + def allocate_exo_errors(self, mode='exogenious'): + """a positive error (in p.u. of pmax) is considered as a long trade position. + a negative error means a as a short trade position. Short may thus lead to increasing dispatch of that agent.""" + if mode == 'exogenious': + new_error = self.forecast_errors.loc[(self.forecast_errors['identification_day']==self.model.clock.get_day())&( + self.forecast_errors['identification_MTU']==self.model.clock.get_MTU())].reset_index() + if new_error.empty: + print('no new exog. forecast errors identified') + else: + print('new forcast error identified:') + print(new_error) + for i in range(len(new_error)): + error_DF = self.model.schedules_horizon.copy() + error_DF.rename(columns = {'commit':'new_error'}, inplace = True) + error_DF['new_error'].loc[(slice(new_error.loc[i,'error_start_day'].astype('float64'),new_error.loc[i,'error_end_day'].astype('float64')), + slice(new_error.loc[i,'error_start_time'].astype('float64'),new_error.loc[i,'error_end_time'].astype('float64')) + )] = new_error.loc[i,'error_magnitude_pu'] + if new_error.loc[i,'who'] == 'all': + for agent in self.model.schedule.agents: + agent_pmax = self.asset_portfolios.loc[self.asset_portfolios['asset_owner'] == agent.unique_id,'pmax'].sum() + #error in MW + error_DF['forecast_error']=error_DF['new_error'] * agent_pmax + agent.trade_schedule['forecast_error'] = agent.trade_schedule['forecast_error'].add(error_DF['forecast_error'], fill_value = 0) + agent.unchanged_position = 'forecast_error' + elif new_error.loc[i,'who'] in self.model.MP_dict: + agent = self.model.MP_dict[new_error.loc[i,'who']] + agent_pmax = self.asset_portfolios.loc[self.asset_portfolios['asset_owner'] == agent.unique_id,'pmax'].sum() + #error in MW + error_DF['forecast_error'] = error_DF['new_error'] * agent_pmax + agent.trade_schedule['forecast_error'] = agent.trade_schedule['forecast_error'].add(error_DF['forecast_error'], fill_value = 0) + agent.unchanged_position = 'forecast_error' + + elif mode == 'from_scenario': + #ATTENTION THIS DOES NOT WORK YET + error_DF = self.model.schedules_horizon.copy() + error_DF.rename(columns = {'commit':'new_error'}, inplace = True) + + elif new_error.loc[i,'who'] == 'system_e_randomly_distributed': + """the given value is the sum of all forecast errors in the system + proportional to the installed capacity. This value is uniformly + distributed over the agents. can also be upto 25% negative for an agent, + while the total system error is positive (and vice versa).""" + system_pmax = self.asset_portfolios['pmax'].sum() + dividers = sorted(random.sample(range(int(-0.25* abs(system_pmax)), + int(abs(system_pmax))), len(self.model.schedule.agents) - 1)) + random_err = [a - b for a, b in zip(dividers + [int(abs(system_pmax))], [0] + dividers)] + #shuffle the list to make sure that error is distributed randomly + random.shuffle(random_err) + i = 0 + for agent in self.model.schedule.agents: + error_DF['forecast_error']=(error_DF['new_error'] * random_err[i]).round() + agent.trade_schedule['forecast_error'] = agent.trade_schedule['forecast_error'].add(error_DF['forecast_error'], fill_value = 0) + agent.unchanged_position = 'forecast_error' + i += 1 + else: + raise Exception('forecast error recepient not known') + + def get_all_assets(self): + """return the total asset portfolio of initialized agents""" + assetsdf = DataFrame() + for agent in self.model.schedule.agents: + all_ids = agent.assets.index.values + # this is a place holder for a Day-ahead market result dispatch method + for i in range(len(all_ids)): + a = agent.assets.loc[all_ids[i],:].item() + df = a.get_as_df() +# df.insert(loc = 0, column ='agent_id',value = agent.unique_id) + assetsdf = pd.concat([assetsdf, df], ignore_index=True) + return(assetsdf) + + + + def generate_forecast_errors(self): + for agent in self.model.schedule.agents: + agent.trade_schedule['forecast_error'] = 0 + + + def IB_default_price (self, day, time): + self.imbalance_price = 35 + return (self.imbalance_price) + + + + + + + + + + + diff --git a/asam classes/GridSystemOperator.py b/asam classes/GridSystemOperator.py new file mode 100644 index 0000000..d96edca --- /dev/null +++ b/asam classes/GridSystemOperator.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri May 5 11:40:02 2017 +@author: Samuel Glismann + +Grid and System Operator (e.g. TSO or DSO) class for ASAM. +The Grid and Sytstem Operator class could get inherent classes for specific operators, such as DSO' or TSO's. + +Currently, the grid and System Operator class contains methods which are typically +executed by a Transmission System Operator (according to EU definitions): + - check_market_consistency() + - update_imbalances_and_returns() +The balancing market operation and imbalance settlement is implemented as a Market Operator method in ASAM. + +Typical Grid Operator methods are: + - determine_congestions() + - redispatch_demand() +The redispatch market/mechansm is implemented in ASAM as a Market Operator method. + +Yet, the ancillary service demand (redispatch) is not simulated but provided exogeneously. +""" + +from mesa import Agent, Model +from mesa.time import RandomActivation +from random import randrange, choice +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +from OrderMessage import * +from Orderbook import * + + + +class GridSystemOperator(): + """The Grid and Sytstem Operator class could get inherent classes for specific + operators, such as DSO' or TSO's""" + + def __init__(self, unique_id, model): +# super().__init__(unique_id, model) + self.unique_id = unique_id + self.money = 0 + self.model = model + # used for unique order ID of Grid and System Operator + self.ordercount = 0 + #redispatch demand does not contain past MTU + #Notation: positive value means upward demand in area,negative means downward + self.red_demand = DataFrame(columns = ['delivery_day', 'delivery_time'] + self.model.gridareas) + self.red_demand.set_index(['delivery_day', 'delivery_time'], inplace = True) + #procured is cleaned every round + self.red_procured = DataFrame(columns=['agent_id','associated_asset','delivery_location','quantity','price', 'delivery_day','delivery_time','order_type','init_time', 'order_id', 'direction','matched_order','cleared_quantity','cleared_price','rem_vol', 'due_amount']) + #return imbalance last round. cleared every round + self.imb_return = 0 + self.financial_return = model.clock.asset_schedules_horizon() #manipulated every round. All returns where delivery periode>current time remain unchanged + self.financial_return = self.financial_return.reindex(columns = ['RD_return', 'BE_return','IB_return', 'total_return']) + self.imbalances = model.clock.asset_schedules_horizon() #manipulated every round. All trade positions where delivery periode>current time remain unchanged + self.imbalances = self.imbalances.reindex(columns= ['imbalance_redispatch', 'imbalance_market(realized)','imbalance_market(scheduled)','imbalance_balancing', 'sum_imbalances' ], fill_value= 0) + self.imbalances.iloc[:]=np.nan + self.system_transactions = model.clock.asset_schedules_horizon() + self.system_transactions = self.system_transactions.reindex(columns=['DAM_sell','DAM_buy','IDM_sell', 'IDM_buy','RDM_sell', 'RDM_buy','BEM_sell','BEM_buy']) + + + + def check_market_consistency(self): + #this check assumes that the System Operator directly receives all trades from market platforms + # via so-called 'nomination on behalve'. + all_trades = self.model.schedules_horizon.copy() + all_trades= all_trades.add(self.system_transactions,fill_value = 0) + buy_keys =[] + sell_keys=[] + if self.model.exodata.sim_task['save_intermediate_results[y/n]']=='y': + #enlarge system transactions timeframe to schedule time index + for obook in self.model.rpt.all_books.keys(): + csell = self.model.rpt.all_books[obook].cleared_sell_sum_quantity.sum() + cbuy =self.model.rpt.all_books[obook].cleared_buy_sum_quantity.sum() + all_trades[str(obook)+ '_sell' ] = csell + all_trades[str(obook)+ '_buy' ] = cbuy + if (obook =='DAM')|(str(obook) =='IDM'): + #store keys of commodity markets to check consistency + buy_keys =buy_keys +[str(obook)+ '_buy'] + sell_keys =sell_keys +[str(obook)+ '_sell'] + + elif self.model.exodata.sim_task['save_intermediate_results[y/n]']=='n': + for obook in self.model.rpt.all_books.keys(): + sells= self.model.rpt.all_books[obook].cleared_sellorders_all_df.groupby(['delivery_day','delivery_time'])[ + 'cleared_quantity'].sum().reset_index() + if sells.empty: + all_trades[str(obook)+ '_sell' ]= np.nan + else: + sells[['delivery_day','delivery_time']]=sells[['delivery_day','delivery_time']] + sells.set_index(['delivery_day','delivery_time'], inplace=True) + all_trades[str(obook)+ '_sell' ]= sells.sort_index().round(0).astype(int).copy() + + buys = self.model.rpt.all_books[obook].cleared_buyorders_all_df.groupby(['delivery_day','delivery_time'])['cleared_quantity'].sum().reset_index() + if buys.empty: + all_trades[str(obook)+ '_buy' ] =np.nan + else: + buys[['delivery_day','delivery_time']]=buys[['delivery_day','delivery_time']] + buys.set_index(['delivery_day','delivery_time'], inplace=True) + all_trades[str(obook)+ '_buy' ] = buys.sort_index().round(0).astype(int).copy() + + if (obook =='DAM')|(str(obook) =='IDM'): + #store keys of commodity markets to check consistency + buy_keys =buy_keys +[str(obook)+ '_buy'] + sell_keys =sell_keys +[str(obook)+ '_sell'] + + + #this is a consistency check: if the sum of all buy orders minus the sum of all sell orders != 0, there is an issue + if ((all_trades[buy_keys].sum(axis=1).fillna(value=0)-all_trades[sell_keys].sum(axis=1).fillna(value=0)).round() != 0).any(): + all_trades['sum_trades'] = all_trades[buy_keys].sum(axis=1).fillna(value=0)-all_trades[sell_keys].sum(axis=1).fillna(value=0) + self.system_transactions[all_trades.columns] = all_trades.copy() + import pdb + pdb.set_trace() + raise Exception ('inconsistent trades detected: globally, cleared trades do not sum up to zero', all_trades) + else: + self.system_transactions = all_trades.copy() + + + def update_imbalances_and_returns(self, positions =[]): + new_imbalances = self.model.schedules_horizon.copy() + new_imbalances = new_imbalances.add(self.imbalances,fill_value = 0) + new_returns = self.model.schedules_horizon.copy() + new_returns = new_returns.add(self.financial_return,fill_value = 0) + for i in positions: + new_transactions = DataFrame() + if i == 'imbalance_redispatch': + new_transactions = self.red_procured[['delivery_day','delivery_time' + ,'cleared_quantity','direction', 'due_amount']] + #reverse pay direction to System Operator convention + new_transactions['due_amount'] = - new_transactions['due_amount'] + k = 'RD_return' + + #attention: buy and sell orders are the market notation. + #sell means market party sold, System Operator bought electricity. Buy means market party bought. + #System Operator can neither produce nor consume electricity. + #positive redispatch imbalance means System Operator has a long position (because System Operator bought more than it sold electricity) + new_imbalances[i] = self.system_transactions['RDM_sell'].sub(self.system_transactions['RDM_buy'], fill_value=0) + + elif i == 'imbalance_market(realized)': + #realized imbalance is only admistered for previous timestamp + #no new transaction is admistered, as no orders involved + k = 'IB_return' + try: + timestamp = self.model.IB_marketoperator.cur_imbalance_settlement_period + except: + timestamp = None + if timestamp == None: + #no imbalance settlement has taken place, or no IBM is simulated + pass + else: + new_imbalances.loc[timestamp,i] = self.model.IB_marketoperator.imbalances_short + self.model.IB_marketoperator.imbalances_long + new_returns.loc[timestamp, k] = self.imb_return + elif i == 'imbalance_market(scheduled)': + #scheduled imbalance from market is only admistered for monitoring + #this is actually not yet firm or settled + + #set former scheduled market imbalance on 0, because it is not added to former scheduled imbalances + new_imbalances.loc[self.model.schedules_horizon.index,i]=0 + + for agent in self.model.schedule.agents: + #negative means short position, positive means long position. + if (agent.trade_schedule.loc[self.model.schedules_horizon.index ,'imbalance_position']!=0).any(): + new_imbalances.loc[self.model.schedules_horizon.index,i + ] = new_imbalances.loc[self.model.schedules_horizon.index,i + ].add(agent.trade_schedule.loc[ + self.model.schedules_horizon.index,'imbalance_position'], fill_value=0) +# #remove scheduled imbalance value from current (-1 because global counter has advanced after agents step) + new_imbalances.loc[self.model.schedules_horizon.index[0],i]=np.nan + else: + raise Exception('Grid and system operator position to be updated is unknown') + if new_transactions.empty: + pass #do nothing. next position. + else: + + new_transactions[k] = new_transactions['due_amount'] + new_transactions.set_index(['delivery_day','delivery_time'], inplace=True) + #sum (saldo) of trades from the agent per timestamp + new_transactions = new_transactions.groupby(level =[0,1]).sum() + #add to 'return' column in self.financial_return + new_returns[k] = new_returns[k].add(new_transactions[k], fill_value = 0) + + #overwrite imbalances + self.imbalances = new_imbalances.copy() + #overwrite self.financial returns + self.financial_return = new_returns.copy() + #calculate net imbalance. + self.imbalances['sum_imbalances'] = self.imbalances[['imbalance_redispatch', 'imbalance_market(realized)','imbalance_market(scheduled)','imbalance_balancing']].sum(axis=1) + #calculate total return. + self.financial_return['total_return'] = self.financial_return[['RD_return','IB_return', 'BE_return']].sum(axis=1) + + + + def determine_congestions (self): + """ + This method determines congestions. However, at the moment this means only + reading exogeneosly provided redispatch demand. + + In future, a load-flow calculation could be added here. + """ + + if self.model.exodata.sim_task['congestions'] == 'exogenious': + #check if exodatabase provides new congestions for this step + #current time; + cur_day = self.model.schedules_horizon.index.get_level_values(0)[0] + cur_mtu = self.model.schedules_horizon.index.get_level_values(1)[0] + new_congestion = self.model.exodata.congestions.loc[ + (self.model.exodata.congestions['identification_day']==cur_day)&( + self.model.exodata.congestions['identification_MTU']==cur_mtu)] + if new_congestion.empty: + return(None) + else: + #make dataframe with areas as columns and horizon as index + new_red_demand = pd.concat([self.model.schedules_horizon.copy(), DataFrame(columns=(self.model.gridareas))]) + new_red_demand[self.model.gridareas] = 0 + new_red_demand.drop('commit', axis =1, inplace = True) + + #make a addable data frame from identified new congestion. + #can be multiple in a round + for i in range(len(new_congestion)): + con_DF = new_red_demand.loc[(slice(new_congestion.loc[i,'congestion_start_day'],new_congestion.loc[i,'congestion_end_day']), + slice(new_congestion.loc[i,'congestion_start_time'],new_congestion.loc[i,'congestion_end_time'])),:].copy() + + con_DF.loc[:,new_congestion.loc[i,'down_area']]= -new_congestion.loc[i,'redispatch_quantity'] + con_DF.loc[:,new_congestion.loc[i,'up_area']]= new_congestion.loc[i,'redispatch_quantity'] + new_red_demand = new_red_demand.add(con_DF, fill_value = 0) + + print('new congestions identified') + return(new_red_demand) + + elif self.model.exodata.sim_task['congestions'] == 'from_scenario': + #select all congestions within the schedules horizon with idetification time == current + new_congestion = self.model.exodata.congestions.loc[ + self.model.exodata.congestions.index.isin(self.model.schedules_horizon.index.values)] + + new_congestion = new_congestion.loc[(new_congestion['identification_day'] == + self.model.schedules_horizon.index.values[0][0])&( + new_congestion['identification_mtu'] == + self.model.schedules_horizon.index.values[0][1])].copy() + + if new_congestion.empty: + return(None) + #make dataframe with areas as columns and horizon as index + new_red_demand = pd.concat([self.model.schedules_horizon.copy(), DataFrame(columns=(self.model.gridareas))]) + new_red_demand[self.model.gridareas] = 0 + new_red_demand.drop('commit', axis =1, inplace = True) + for congestion in Series(list(zip(list(new_congestion['redispatch_areas_down']),list( + new_congestion['redispatch_areas_up'])))).unique(): + #Series of a specific congestion + C_MW = new_congestion.loc[(new_congestion['redispatch_areas_down']==congestion[0])&( + new_congestion['redispatch_areas_up']==congestion[1]), 'congestion_MW'] + #down area + new_red_demand[congestion[0]] = new_red_demand[congestion[0]].add(-C_MW, fill_value = 0) + #up area + new_red_demand[congestion[1]] = new_red_demand[congestion[1]].add(C_MW, fill_value = 0) + print('new congestions identified:') + print(new_congestion) + return(new_red_demand) + + + def redispatch_demand(self, new_congestion): + """ + Method combines new redispatch demand with previous redispatch demand (if not yet solved). + It formulates demand orders. + + input: congestions (list with congestion parameters) + """ + #check if redisaptch is part of simmulation task + if self.model.exodata.sim_task['run_RDM[y/n]']=='n': + print('Grid Operator: no redispatch in simlulation task') + else: + #first delete all Grid Operator redispatch orders from previous round from orderbook + self.model.red_obook.delete_orders(agent_id_orders = self.unique_id) + + #get DF with schedule horizon for new redispatch demand DF + new_red_demand = pd.concat([self.model.schedules_horizon.copy(), DataFrame(columns=(self.model.gridareas))]) + new_red_demand[self.model.gridareas] = 0 + new_red_demand.drop('commit', axis =1, inplace = True) + + #get redispatch demand from previous round + new_red_demand = new_red_demand.add(self.red_demand, fill_value = 0) + + + # include procured redispatch from previous round + if self.red_procured.empty: + #no redispatch procured in previous round + pass + else: + self.red_procured.set_index(['delivery_location','delivery_day','delivery_time'], inplace =True) + + #make buyorders negative + self.red_procured['cleared_quantity']=self.red_procured['cleared_quantity'].where( + self.red_procured['direction']=='sell',-self.red_procured['cleared_quantity']) + #make again unique values per location, day and time + shifted = self.red_procured['cleared_quantity'].groupby(level=[0,1,2]).sum() + #pivot to get the areas as column names + shifted = shifted.unstack(level = 0) + #DF with booleans to check if more is procured than needed + over_proc = new_red_demand.abs().sub(shifted.abs(), fill_value = 0) < 0 + remaining_demand = new_red_demand.sub(shifted, fill_value = 0) + #substract procured redispatch from red_demand and fill 0 if proc>demand + new_red_demand = remaining_demand.where(~over_proc, 0) + self.red_procured = self.red_procured.iloc[0:0] + self.red_procured.reset_index(inplace=True) + #remove demand of delivery MTU that lie in the past + new_red_demand = new_red_demand.loc[self.model.schedules_horizon.index] + + #add new congestions (if any) + if new_congestion is None: + print("no Grid Operator additional congestions identified in this round") + pass + else: + new_red_demand = new_red_demand.add(new_congestion, fill_value = 0) + self.red_demand = new_red_demand.copy() + + #delete all Grid Operator orders from previous rounds + self.model.red_obook.delete_orders(agent_id_orders = -1) + #make the redispatch_demand orders + order_lst=[] + make_orders = self.red_demand.loc[(self.red_demand != 0).any(axis=1)] + make_orders.columns.name = 'delivery_location' + make_orders = make_orders.stack() + make_orders.name = 'quantity' + make_orders = make_orders.reset_index() + #exclude all areas with 0 demand + make_orders = make_orders.loc[make_orders['quantity'] != 0].copy() + make_orders = make_orders.reset_index() + init = self.model.schedule.steps -1 + 0.0 + for k in range(len(make_orders)): + order_id = str("GridOperator") + str("_") + str(self.ordercount) + location = make_orders.loc[k,'delivery_location'] + #note that quantity will be positive in the Grid Operator order (23-08-2018) + vol = int(make_orders.loc[k,'quantity']) + if vol > 0: + direction = 'sell' + elif vol < 0: + direction = 'buy' + else: + continue + price = np.nan + delivery_duration =1 + delday = int(make_orders.loc[k,'delivery_day']) + deltime = int(make_orders.loc[k,'delivery_time']) + GridOperatorBid = [self.unique_id,"Network", location,abs(vol),price, delday, deltime, + 'redispatch_demand', init, order_id, direction, delivery_duration] + order_lst.append(GridOperatorBid) + self.ordercount += 1 + if order_lst: + orders = OrderMessage(order_lst) + self.model.red_obook.add_order_message(orders) + + + + diff --git a/asam classes/Main.py b/asam classes/Main.py new file mode 100644 index 0000000..4b45855 --- /dev/null +++ b/asam classes/Main.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Sep 3 15:24:23 2017 +@author: Samuel Glismann + +Main class to run ASAM simulations. +This script reads input files, initiates an ASAM model from it, starts the simulation, +collects results and stores them in an excel file. + + + +""" + +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +import matplotlib.pyplot as plt +from datetime import datetime +from MarketModel import * +from Visualization import * +import mesa +from IPython import get_ipython +import os + + +pd.options.mode.chained_assignment = None # default='warn' +def read_input_data(path,filename): + """ + - Method: read exogenous data for the simulation from excel + - path is the file directory + - filename is the name of the input file (including .xlsx) + """ + + #prepare parameters for model + print('read exogenious data') + simulation_parameters =pd.read_excel(path+filename, sheetname=None) + simulation_parameters['da_residual_load'] = pd.read_excel(path+filename, sheetname='da_residual_load', header =[0,1]) + simulation_parameters['simulation_task'] = pd.read_excel(path+filename, sheetname='simulation_task', index_col=0).squeeze() + + #get IBP kde pdfs from hdf5 file + IBP_kde_pdfs = pd.read_hdf(path+'IBP_pdf_kernels_allISPcs_20201213.h5', 'IBP_pdf_kernels') + #add dataframe from pickl to dictionary + simulation_parameters['IBP_kde_pdfs'] = IBP_kde_pdfs + return (simulation_parameters) + +"""directories to be entered""" +#input directory +idir=r'input_data/' +#output directory +rdir=r'results/' +#filename +iname = "example_scenario.xlsx" + +#read simulation input file +simulation_parameters = read_input_data(idir, iname) +# +simulation_parameters['output_path'] = rdir + +simulation_start_time = datetime.now().replace(microsecond=0) +sim_task = simulation_parameters['simulation_task'] + +#initiate model +model = MarketModel(simulation_parameters, seed = sim_task['seed']) +#run simulation steps +for i in range(sim_task['number_steps']): + model.step() + +simulation_end_time = datetime.now().replace(microsecond=0) +simulation_run_time = simulation_end_time - simulation_start_time + +print(">>>>>>>>>>>>>>>>>>END Simulation>>>>>>>>>>>>>>>>>>>>>>>>>>>") +print("process results for output") + + +#prepare storing simulation inputs +simulation_task= model.exodata.sim_task +simulation_task['sim_start_time'] = simulation_start_time +simulation_task['sim_end_time'] = simulation_end_time +simulation_task['sim_run_time'] = str(simulation_run_time) +simname=simulation_task['simulation_name']+'_' +assetsdf = model.exodata.get_all_assets() +congestionsdf = model.exodata.congestions +agent_strategiesdf = model.exodata.agent_strategies +market_rulesdf = model.exodata.market_rules +forecast_errorsdf = model.exodata.forecast_errors + + +###excel writing main input data +stamp= str(datetime.now().replace(microsecond=0)) +stamp=stamp.replace('.','') +stamp=stamp.replace(':','_') +writer = pd.ExcelWriter(rdir+'ModelResults'+simname+stamp+'.xlsx', engine='xlsxwriter') +simulation_task.to_frame().to_excel(writer, sheet_name = 'simulation_input') +startrow = len(simulation_task)+2 +market_rulesdf.to_excel(writer, sheet_name = 'simulation_input', startrow=startrow) +startrow += len(market_rulesdf)+2 +agent_strategiesdf.to_excel(writer, sheet_name = 'simulation_input', startrow=startrow) +startrow += len(agent_strategiesdf)+2 +assetsdf.to_excel(writer, sheet_name = 'simulation_input', startrow=startrow) +startrow += len(assetsdf)+2 +congestionsdf.to_excel(writer, sheet_name = 'simulation_input', startrow=startrow) +startrow += len(congestionsdf)+2 +forecast_errorsdf.to_excel(writer, sheet_name = 'simulation_input', startrow=startrow) +startrow += len(forecast_errorsdf)+2 + +#interdependence indicators +indicators, allprofitloss =model.rpt.interdependence_indicators() +indicators.to_excel(writer,sheet_name = 'interdependence_indicators') + +keyfigures=model.rpt.final_keyfigures() +###change index value for plotting purposes label +keyfigures.loc['imbalance_market',:]=keyfigures.loc['imbalance_market(scheduled)',:] +keyfigures.drop('imbalance_market(scheduled)', axis=0, inplace=True) +keyfigures.to_excel(writer, sheet_name ='key_figures') + +model.rpt.redispatch_PI().to_excel(writer, sheet_name = 'performance_indicators') + +#get results of reporters as DataFrame (mesa) +agentdf = model.dc.get_agent_vars_dataframe() +agentdf= agentdf.unstack(1) +agentdf.sort_index(axis=1, inplace=True) +agentdf.to_excel(writer, sheet_name = 'AgentResults') + +#add mark-up analyses (mark-up is added to order dataframes) +model.rpt.mark_ups_analysis() + +#all order collection per round +all_collections=[] +collection_names=[] +if sim_task['run_IDM[y/n]'] =='y': + all_collections += [model.IDM_obook.sellorders_all_df, + model.IDM_obook.buyorders_all_df, + model.IDM_obook.cleared_sellorders_all_df, + model.IDM_obook.cleared_buyorders_all_df] + collection_names +=['IDsellorders','IDbuyorders','IDc_sellorders','IDc_buyorders'] + +if sim_task['run_RDM[y/n]'] =='y': + all_collections += [model.red_obook.sellorders_all_df, + model.red_obook.buyorders_all_df, + model.red_obook.cleared_sellorders_all_df, + model.red_obook.cleared_buyorders_all_df, + model.red_obook.redispatch_demand_upward_all_df, + model.red_obook.redispatch_demand_downward_all_df] + collection_names += ['RDMsellorders','RDMbuyorders','RDMc_sellorders','RDMc_buyorders', 'RDM_demand_upward','RDM_demand_downward'] + +if sim_task['run_DAM[y/n]']=='y': + all_collections += [model.DAM_obook.sellorders_all_df, + model.DAM_obook.buyorders_all_df, + model.DAM_obook.cleared_sellorders_all_df, + model.DAM_obook.cleared_buyorders_all_df] + collection_names +=['DAMsellorders','DAMbuyorders','DAc_sellorders','DAc_buyorders'] + +if sim_task['run_BEM[y/n]'] =='y': + all_collections += [model.BEM_obook.sellorders_all_df, + model.BEM_obook.buyorders_all_df] + collection_names +=['BEsellorders','BEbuyorders'] + +bb=0 +for a in all_collections: + if not a.empty: + a.to_excel(writer,sheet_name=collection_names[bb]) + bb+=1 + +model.rpt.get_cleared_prices().to_excel(writer,sheet_name='cleared_prices') +model.rpt.get_system_dispatch().to_excel(writer,sheet_name='system_dispatch') +model.rpt.get_all_trade_schedules().to_excel(writer,sheet_name='trade_schedules') +model.rpt.get_all_returns().to_excel(writer,sheet_name='all_returns') +writer.save() + + +print(" done------------------------------------") + + + + + + diff --git a/asam classes/MarketModel.py b/asam classes/MarketModel.py new file mode 100644 index 0000000..c000d5f --- /dev/null +++ b/asam classes/MarketModel.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Aug 17 09:30:00 2017 +@author: Samuel Glismann + +Market Model class of ASAM. + +This class: + 1. initiates the model + 2. has a simulation step function, which triggers activities of all agents + +""" + +from mesa import Agent, Model +from mesa.datacollection import DataCollector +from mesa.time import RandomActivation +from random import randrange, choice +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +from Time import * +from Orderbook import * +from Asset import * +from MarketParty import * +from MarketOperator import * +from GridSystemOperator import * +from Reports import * +from ExogeniousDatabase import * +from Visualization import * + + + +class MarketModel(Model): + def __init__(self, simulation_parameters, seed = None): + #seed != none allows for the same random numbers in various runs. + + #exogenious database + self.exodata = ExoData(self, simulation_parameters) + + + #Scheduler class of Mesa RandomActivation of market party agents + self.schedule = RandomActivation(self) + + #areas + self.gridareas = list(self.exodata.asset_portfolios['location'].unique()) + + # Time class for a clock + self.clock = Time(self, startday = self.exodata.sim_task['start_day'], + step_size = "15_minutes", startMTU = self.exodata.sim_task['start_MTU'], + step_numbers = self.exodata.sim_task['number_steps'], + DA_GCT = self.exodata.market_rules.loc['gate_closure_time','DAM']) + + #DataFrame that provides the time horizon for setting schedules and forward looking of agents + self.schedules_horizon = DataFrame() + + if self.exodata.sim_task['run_DAM[y/n]']=='y': + #Orderbook day-ahead market + self.DAM_obook =Orderbook (self, ob_type='DAM') + #Initiate market operator day-ahead + self.DA_marketoperator = MO_dayahead(self, self.DAM_obook, self.exodata.market_rules['DAM']) + + if self.exodata.sim_task['run_IDM[y/n]']=='y': + #Orderbook for intraday market + self.IDM_obook = Orderbook (self,ob_type='IDM') + #Initiate market operator for redispatch + self.ID_marketoperator = MO_intraday(self, self.IDM_obook, self.exodata.market_rules['IDM']) + + if self.exodata.sim_task['run_RDM[y/n]']=='y': + #Orderbook for redispatch + self.red_obook = Orderbook (self, ob_type='redispatch') + #Initiate market operator for redispatch + self.RD_marketoperator = MO_redispatch(self, self.red_obook, self.exodata.market_rules['RDM']) + + if self.exodata.sim_task['run_BEM[y/n]']=='y': + #Orderbook balancing market/mechanism + self.BEM_obook =Orderbook (self, ob_type='BEM') + #Initiate market operator balancing + self.BE_marketoperator = MO_balancing_energy(self, self.BEM_obook, self.exodata.market_rules['BEM']) + + if self.exodata.sim_task['run_IBM[y/n]']=='y': + #Orderbook imbalance market/mechanism + self.IBM_obook = Orderbook (self, ob_type=None) + #Initiate market operator imbalance + self.IB_marketoperator = MO_imbalance(self, self.IBM_obook, self.exodata.market_rules['IBM']) + + self.plots = self.exodata.sim_task['plots_during_simulation'] + + #create Grid and System Operator agent (e.g. TSO and/or DSO) + self.aGridAndSystemOperator = GridSystemOperator("Grid_and_System_Operator", self) + + #dictionary referening to all market party agents + self.MP_dict = {} + # Create MP agents from exodata + for i in self.exodata.asset_portfolios['asset_owner'].unique(): + #temp DF to make code better to read + df = self.exodata.asset_portfolios.loc[self.exodata.asset_portfolios['asset_owner'] == i].reset_index() + lst_assets =[] + for k in range(len(df)): + newasset = Asset(self, assetowner = i, assetname= str(df.loc[k,'asset_name']), + pmax = df.loc[k,'pmax'].astype(int), pmin = df.loc[k,'pmin'].astype(int), + location = df.loc[k,'location'], srmc = df.loc[k,'srmc'].astype(int), + ramp_limit_up = df.loc[k,'ramp_limit_up'], + ramp_limit_down = df.loc[k,'ramp_limit_down'], + min_up_time = df.loc[k,'min_up_time'], + min_down_time = df.loc[k,'min_down_time'], + start_up_cost = df.loc[k,'start_up_cost'].astype(int), + shut_down_cost = df.loc[k,'shut_down_cost'].astype(int), + ramp_limit_start_up = df.loc[k,'ramp_limit_start_up'], + ramp_limit_shut_down = df.loc[k,'ramp_limit_shut_down']) + lst_assets.append([str(df.loc[k,'asset_name']), newasset]) + + #asset portfolio provided to MarketParty class as DF with key and Asset() instances + asset_portfolio = DataFrame(lst_assets, columns = ['ID', 'object']) + asset_portfolio.set_index(['ID'], inplace = True) + #get agent strategy from exodata + if str(i) in self.exodata.agent_strategies['agent'].values: + strategy = Series(self.exodata.agent_strategies.loc[self.exodata.agent_strategies['agent']==str(i)].squeeze()) + + elif 'All' in self.exodata.agent_strategies['agent'].values: + #if not specifically defined + strategy = Series(self.exodata.agent_strategies.loc[self.exodata.agent_strategies['agent']=='All'].squeeze()) + else: + raise Exception ('no usable strategy found for agent ',i) + a = MarketParty(str(i), self, assets = asset_portfolio, agent_strategy = strategy) + self.schedule.add(a) + self.MP_dict[str(i)] = a + + + #initiate Reports() class + self.rpt = Reports(self) + + print('___Simulation task:') + print(self.exodata.sim_task) + print('___Simulated Portfolio:' ) + print(self.exodata.get_all_assets()) + + #visualisation class for plotting + self.visu = Visualizations(self) + + #get Dicts to use in MESA build-in datacollection and reporting methods + self.dc = DataCollector(model_reporters = self.rpt.model_reporters, + agent_reporters = self.rpt.agent_reporters, + tables = self.rpt.table_reporters) + + def step(self): + '''Advance the model by one step.''' + print(">>>>>>>>>>>>>>>>>>>>>>>>>>STEP>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + print("testmodel step", self.schedule.steps +1) + print("testmodel MTU: ", self.clock.get_MTU()) + print("testModel day:", self.clock.get_day()) + + #give a number to the agent steps, to later track the (random) order. + self.agent_random_step_index = 0 + self.schedules_horizon = self.clock.asset_schedules_horizon().copy() + + if self.exodata.sim_task['run_DAM[y/n]']=='y': + self.DA_marketoperator.clear_dayahead() + + #forecast error (assigned to agents_trade position) + if self.exodata.sim_task['forecast_errors']=='exogenious': + self.exodata.allocate_exo_errors() + + #determine expected imbalances prices based on day-ahead price in schedule_horizon format + self.rpt.update_expected_IBP() + + #the MESA step counter is +1 after(!) schedule.step() + self.schedule.step() + + if self.exodata.sim_task['run_RDM[y/n]']=='y': + print("TSO agent determines congestions and redispatch demand") + new_congestions = self.aGridAndSystemOperator.determine_congestions() + self.aGridAndSystemOperator.redispatch_demand(new_congestions) + self.RD_marketoperator.clear_redispatch() + else: + print('no TSO redispatch in this simulation') + + if self.exodata.sim_task['run_BEM[y/n]']=='y': + self.BE_marketoperator.determine_control_state() + + if self.exodata.sim_task['run_IBM[y/n]']=='y': + self.IB_marketoperator.imbalance_clearing() + self.IB_marketoperator.imbalance_settlement() + + #note that the intraday market is cleared instantanously during every agent step + #MESA data collector function + self.dc.collect(self) + + if self.exodata.sim_task['save_intermediate_results[y/n]']=='y': + self.rpt.save_market_stats(mode='every_step') + elif self.exodata.sim_task['save_intermediate_results[y/n]']=='n': + self.rpt.save_market_stats(mode='at_end') + + self.aGridAndSystemOperator.check_market_consistency() + self.aGridAndSystemOperator.update_imbalances_and_returns(positions =[ + 'imbalance_redispatch','imbalance_market(realized)', + 'imbalance_market(scheduled)' ]) + + #plots + if self.plots =='every_step': + #Ensure that agent schedules are updated. + #Otherwise dispatch and trade schedules are not plotted in synch. with simulation step. + #The unchanged_position variable ensures that in a next step the updates can be skipped. + for agent in self.schedule.agents: + agent.update_trade_schedule(positions=['DA_position','ID_position','RD_position','BE_position']) + agent.set_asset_commit_constraints() + agent.portfolio_dispatch() + self.visu.show_trade_per_agent() + self.visu.show_dispatch_per_agent() + self.visu.show_return_per_agent() + self.visu.show_system_balance() + + elif self.plots =='every_change': + #only plot when something changed + something_changed=[] + for agent in self.schedule.agents: + if agent.unchanged_position == False: + something_changed +=[True] + if something_changed: + #Ensure that agent schedules are updated. + #Otherwise dispatch and trade schedules are not plotted in synch. with simulation step. + #The unchanged_position variable ensures that in a next step the updates can be skipped. + for agent in self.schedule.agents: + agent.update_trade_schedule(positions=['DA_position','ID_position','RD_position','BE_position']) + agent.set_asset_commit_constraints() + agent.portfolio_dispatch() + self.visu.show_trade_per_agent() + self.visu.show_dispatch_per_agent() + self.visu.show_return_per_agent() + self.visu.show_system_balance() + + elif self.plots =='at_end': + #no plots during simulation + pass + + #calculate timestamp of last round + day, MTU= self.clock.calc_timestamp_by_steps(self.schedule.steps -1, 0) + if (day==self.clock.end_date[0])&(MTU==self.clock.end_date[1]): + + if (self.plots =='at_end')|(self.plots =='every_change'): + if (self.plots =='every_change'): + if (len(something_changed)>0): + #plots and updates already executed + pass + else: + print(">>>>>>> Final update of trade schedules and dispatch optimalisation") + #Ensure that agent schedules are updated one last time + for agent in self.schedule.agents: + agent.update_trade_schedule(positions=['DA_position','ID_position','RD_position','BE_position']) + agent.set_asset_commit_constraints() + agent.portfolio_dispatch() + self.visu.show_trade_per_agent() + self.visu.show_dispatch_per_agent() + self.visu.show_return_per_agent() + self.visu.show_system_balance() + self.visu.show_cost_distribution() + self.visu.show_redispatch_PI() + self.visu.show_redispatch_summary() + self.visu.show_cleared_prices() + diff --git a/asam classes/MarketOperator.py b/asam classes/MarketOperator.py new file mode 100644 index 0000000..a8a7166 --- /dev/null +++ b/asam classes/MarketOperator.py @@ -0,0 +1,1221 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Aug 12 16:18:25 2017 +@author: Samuel Glismann + +MarketOperator + +Market operators have an orderbook class. +The market rules per market have the variables + - gate_opening_time + - gate_closure_time + - acquisition_method + - pricing_method + - order_types + - provider_accreditation + +Market opperators generally have matching and clearing methods. +EPEX definition of 'clearing': Financial and physical settlement of transactions. +https://www.epexspot.com/en/glossary#l3 + +Imbalance settlement operators has no matching method, as no orders are used to determine the outcome. + +Please consult the ASAM documentation for more background about the clearing algorithms. +""" +from mesa import Agent, Model +from random import randrange, choice +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +import pypsa +from pypsa.opt import l_constraint, l_objective, LExpression, LConstraint + +class MarketOperator (): + def __init__(self, model, orderbook, market_rules): + self.obook = orderbook + self.model = model + #series with rules for the markets + self.rules = market_rules + +class MO_intraday(MarketOperator): + def __init__(self, model, orderbook, market_rules): + MarketOperator.__init__(self, model, orderbook, market_rules) + if self.rules.loc['gate_opening_time']=='D-1, MTU 56': + #gate opening time is a number of MTU of a day (not rolling) + self.gate_opening_time = 56 + else: + raise Exception('inraday gate opening time value not known') + if self.rules.loc['gate_closure_time']=='deliveryMTU-1': + #attention: this gate- closure-time is an RELATIVE MTU of until the first delivery MTU (in opposite to single auctions) + #+1 because the gate closes at the beginnig of this relative MTU. + self.gate_closure_time = 1 +1 + else: + raise Exception('inraday gate opening time value not known') + + def match_intraday_orders(self, for_agent_id=None): + """clear every round all ID orders of the same type (limit orders/all + or nothing orders, same duration)[continuous double auction]""" + if ((self.rules['acquisition_method']== 'continous')&( + (self.rules['order_types']== 'limit_and_market')|( + self.rules['order_types']== 'limit_market_IDCONS'))&( + self.rules['pricing_method']== 'best_price')): + #note: rank of labels is important in this method. + labels_in_rank = self.obook.offerlabels + ['rem_vol', 'cleared_quantity','cleared_price', 'matched_order'] + print("...: matching intraday orders") + # get sorted sell and buy orders + sellorders = self.obook.get_obook_as_multiindex(selection = 'sellorders', incl_location = False).sort_index(axis=1) + buyorders = self.obook.get_obook_as_multiindex(selection = 'buyorders',incl_location = False).sort_index(axis=1) + #block orders are not matched on (only as IDCONS) + buyorders = buyorders.loc[buyorders['delivery_duration'] == 1].copy() + sellorders = sellorders.loc[sellorders['delivery_duration'] == 1].copy() + + #extract new orders of the agent that triggered the clearing process + agent_sells = sellorders.loc[sellorders['agent_id'] == for_agent_id].copy() + agent_sells.sort_index(inplace = True) + agent_buys = buyorders.loc[buyorders['agent_id'] == for_agent_id].copy() + agent_buys.sort_index(inplace = True) + + #remove the agent orders from the book for clearing (increases speed) + sellorders = sellorders.loc[sellorders['agent_id'] != for_agent_id].copy() + buyorders = buyorders.loc[buyorders['agent_id'] != for_agent_id].copy() + #filter on delivery date-time start + list_of_matches=[] + + for delivery_days, m_orders in sellorders.groupby(level=[0,1]): + if (m_orders.empty)|(agent_buys.empty): + agent_orders=DataFrame() + else: + try: + agent_orders = agent_buys.loc[agent_buys.index.isin(m_orders.index)] + except: + import pdb + pdb.set_trace() + for i in range(len(agent_orders)): + #remove orders with remaining quantity 0 + orders = m_orders.loc[(m_orders['rem_vol'] > 0)&(m_orders['price'] < agent_orders['price'].iloc[i])] + #end loop if no orders left for matching + if len(orders)==0: + break + #agent order as list + a_order = agent_orders.reset_index()[labels_in_rank].iloc[i].values + for k in range(len(orders)): + #order as list + order = orders.reset_index()[labels_in_rank].iloc[k].values + #Order columns are ranked as followed with labls_in_rank: + #agent_id,associated_asset,delivery_location,quantity,price,delivery_day,delivery_time, + #order_type,init_time,order_id,direction, delivery_duration + #rem_vol,cleared_quantity,cleared_price,matched_order + rem_qty_orders = order[12] + rem_qty_agent_orders = a_order[12] + match_vol = min(rem_qty_orders,rem_qty_agent_orders) + new_rem_qty_orders= rem_qty_orders - match_vol + new_rem_qty_agent_orders= rem_qty_agent_orders - match_vol + cleared_price = order[4] + matched_id_orders = a_order[9] + matched_id_agent_orders = order[9] + order[12]= new_rem_qty_orders + order[13] = match_vol + order[15] = matched_id_orders + order[14] = cleared_price + a_order[12]= new_rem_qty_agent_orders + a_order[13] = match_vol + a_order[15] = matched_id_agent_orders + a_order[14] = cleared_price + list_of_matches.append(a_order.copy()) + list_of_matches.append(order.copy()) + + #ensure that in the next loop the new remaining value is used + m_orders['rem_vol'].loc[m_orders['order_id'] == order[9]] = new_rem_qty_orders + if new_rem_qty_agent_orders == 0: + break + + for delivery_days, m_orders in buyorders.groupby(level=[0,1]): + if (m_orders.empty)|(agent_sells.empty): + agent_orders=DataFrame() + else: + try: + agent_orders = agent_sells.loc[agent_sells.index.isin(m_orders.index)] + except: + import pdb + pdb.set_trace() + for i in range(len(agent_orders)): + #remove orders with remaining quantity 0 + orders = m_orders.loc[(m_orders['rem_vol'] > 0)&(m_orders['price'] > agent_orders['price'].iloc[i])] + #end loop if no orders left for matching + if len(orders)==0: + break + #agent order as list + a_order = agent_orders.reset_index()[labels_in_rank].iloc[i].values + for k in range(len(orders)): + #order as list + order = orders.reset_index()[labels_in_rank].iloc[k].values + #Order columns are ranked as followed with labls_in_rank: + #agent_id0,associated_asset1,delivery_location2,quantity3,price4,delivery_day5,delivery_time6, + #order_type7,init_time8,order_id9,direction10, delivery_duration11 + #rem_vol12,cleared_quantity13,cleared_price14,matched_order15 + rem_qty_orders = order[12] + rem_qty_agent_orders = a_order[12] + match_vol = min(rem_qty_orders,rem_qty_agent_orders) + new_rem_qty_orders= rem_qty_orders - match_vol + new_rem_qty_agent_orders= rem_qty_agent_orders - match_vol + cleared_price = order[4] + matched_id_orders = a_order[9] + matched_id_agent_orders = order[9] + order[12]= new_rem_qty_orders + order[13] = match_vol + order[15] = matched_id_orders + order[14] = cleared_price + a_order[12]= new_rem_qty_agent_orders + a_order[13] = match_vol + a_order[15] = matched_id_agent_orders + a_order[14] = cleared_price + list_of_matches.append(a_order.copy()) + list_of_matches.append(order.copy()) + + #ensure that in next loop the new remaining value is used + m_orders['rem_vol'].loc[m_orders['order_id'] == order[9]] = new_rem_qty_orders + if new_rem_qty_agent_orders == 0: + break + + if list_of_matches: #list is not empty + #make DF again + matched = DataFrame(list_of_matches, columns = labels_in_rank) + #remove fully matched orders from actual orderbook + full_match = matched.loc[matched['rem_vol']==0].copy() + self.obook.remove_matched_orders(full_match) + #adjust order quantities in orderbook which are partially matched + part_match = matched.loc[matched['rem_vol']!=0].copy() + part_match = part_match.loc[~part_match['order_id'].isin(full_match['order_id'])] + #drop all orders which are several times partially cleared. + #only keep the last, because this is by definition the smallest order + part_match.drop_duplicates(subset = 'order_id', keep = 'last', inplace = True) + self.obook.adjust_partial_match_orders(part_match) + else: + matched = DataFrame() + #remove all market orders + self.obook.remove_market_orders() + return (matched) + else: + raise Exception('IDM clearing_type, ordertypes, pricing method combination not known') + + def clear_intraday(self, for_agent_id=None): + self.obook.update_orders() + invoice = self.match_intraday_orders(for_agent_id=for_agent_id) + + if invoice.empty: + print("no intraday orders cleared") + else: + #dublicate orders with a delivery duration > 1 to ensure correct administration in reports and settlement + if invoice.loc[invoice['delivery_duration']>1].empty: + pass + else: + blocks = invoice.loc[invoice['delivery_duration']>1] + for i in range(len(blocks)): + df = DataFrame( + [blocks.iloc[i]] *(blocks['delivery_duration'].iloc[i] - 1)) + day_lst, mtu_lst = self.model.clock.calc_delivery_period_range( + blocks['delivery_day'].iloc[i], + blocks['delivery_time'].iloc[i], + blocks['delivery_duration'].iloc[i]) + df['delivery_day'] = day_lst + df['delivery_time'] = mtu_lst + invoice = invoice.append(df, ignore_index = True) + print('setteling matched intraday trades') + #report to cleared orderbook + self.obook.cleared_sellorders = self.obook.cleared_sellorders.append(invoice.loc[invoice['direction']== 'sell']) + self.obook.cleared_buyorders = self.obook.cleared_buyorders.append(invoice.loc[invoice['direction']== 'buy']) + #calculate money sum due (devide by 4 as very product is 15 minutes= 1 MTU and prices are EUR/MWh) + invoice['due_amount']= invoice['cleared_quantity'] * invoice['cleared_price']/4 + #make buy due amounts negative + invoice['due_amount']= invoice['due_amount'].where(invoice['direction']=='sell',-1*invoice['due_amount']) + for agent in self.model.schedule.agents: + agent.accepted_ID_orders = agent.accepted_ID_orders.append(invoice.loc[invoice['agent_id'] == agent.unique_id]) + transactions = invoice['due_amount'].loc[invoice['agent_id'] == agent.unique_id].sum() + agent.money += transactions + +class MO_redispatch(MarketOperator): + def __init__(self, model, orderbook, market_rules): + MarketOperator.__init__(self, model, orderbook, market_rules) + if self.rules.loc['gate_opening_time']=='D-1, MTU 56': + #attention: this gate-opening time is an absolute MTU of a day + self.gate_opening_time = 56 + else: + raise Exception('redispatch gate opening time value not known') + if self.rules.loc['gate_closure_time']=='deliveryMTU-1': + #attention: this gate- closure-time is an RELATIVE MTU of until the first delivery MTU (in contrast to single auctions) + #+1 because the gate closes at the beginnig of this relative MTU. + self.gate_closure_time = 1 + 1 + elif self.rules.loc['gate_closure_time']=='deliveryMTU-2': + self.gate_closure_time = 2 +1 + elif self.rules.loc['gate_closure_time']=='deliveryMTU-3': + self.gate_closure_time = 3 +1 + elif self.rules.loc['gate_closure_time']=='deliveryMTU-4': + self.gate_closure_time = 4 +1 + else: + raise Exception('redispatch gate closure time value not known') + + if (self.rules.loc['acquisition_method']=='cont_RDM_thinf')|( + self.rules.loc['acquisition_method']=='cont_RDM_th0')|( + self.rules.loc['acquisition_method']=='cont_RDM_th50')|( + self.rules.loc['acquisition_method']=='cont_RDM_th5'): + #'cont_RDM_th0' is an abbrevation for continuous redispatch market/mechanism + # with a threshold for 'equilibrium constraint' for upward and downward quantity of 0 MW. + # the other abbrevations work accordingly. + #PyPSA model is used for matching. + + #Value of lost load used for slack generators capturing under-procurement + Voll= 10000 + self.commit_model = pypsa.Network() + for a in self.model.gridareas: + self.commit_model.add("Bus", a +'_up', carrier = 'DC') + self.commit_model.add("Bus", a +'_down', carrier = 'DC') + self.commit_model.add("Load", 'up_demand_' + a, bus = a + '_up') + self.commit_model.add("Load",'down_demand_' + a, bus = a + '_down') + #add a generator that captures under-procurement downward redispatch + self.commit_model.add("Generator",a+'short_position_down',bus=a +'_down', + committable=True, + p_min_pu= 0, + marginal_cost = Voll, + p_nom=10000) + #add a storage unit to capture over-procurement in downward direction + self.commit_model.add("Generator",a+'long_position_down',bus=a +'_down', + committable = True, + p_min_pu= 1, + p_max_pu= 0, + p_nom=-10000, + marginal_cost= -1001) + #add a storage unit to capture over-procurement in upward direction + self.commit_model.add("Generator",a+'long_position_up',bus=a +'_up', + committable = True, + p_min_pu= 1, + p_max_pu= 0, + p_nom=-10000, + marginal_cost= -1001) + self.commit_model.add("Generator",a+'short_position_up',bus=a +'_up', + committable=True, + p_min_pu= 0, + marginal_cost = Voll, + p_nom=10000) + + #these placeholders are used for a lopf constraint in case there are no orders. + self.commit_model.add("Generator",'placeholder_gen_down' ,bus=a +'_down', + committable=True, + p_min_pu= 0, + marginal_cost = Voll, + p_nom=0) + self.commit_model.add("Generator",'placeholder_gen_up' ,bus=a +'_up', + committable=True, + p_min_pu= 0, + marginal_cost = Voll, + p_nom=0) + if self.rules.loc['acquisition_method']=='cont_RDM_thinf': + # there is no constraint regarding the quantity equilibrium of upward and downward redispatch actions per MTU + self.imb_threshold = np.inf + elif self.rules.loc['acquisition_method']=='cont_RDM_th0': + # there is constraint regarding the quantity equilibrium of upward and downward redispatch actions per MTU + self.imb_threshold = 0 + elif self.rules.loc['acquisition_method']=='cont_RDM_th50': + # there is a constraint regarding the quantity equilibrium of upward and downward redispatch actions per MTU + self.imb_threshold = 50 + elif self.rules.loc['acquisition_method']=='cont_RDM_th5': + # there is a constraintregarding the quantity equilibrium of upward and downward redispatch actions per MTU + self.imb_threshold = 5 + else: + self.imb_threshold = np.inf #default + + def match_redispatch_orders (self): + if (self.rules['acquisition_method']== 'cont_RDM_thinf')|( + self.rules['acquisition_method']== 'cont_RDM_th0')|( + self.rules.loc['acquisition_method']=='cont_RDM_th5')|( + self.rules.loc['acquisition_method']=='cont_RDM_th50'): + #matched orders df + matched = DataFrame(columns = self.obook.offerlabels +['due_amount']) + print("matching redispatch orders with pypsa") + #taking a copy of the pypsa model for redispatch ensures that no previous orders are included + commit_model = self.commit_model.copy() + + def redispatch_solution_constraints(network, snapshots): + #this local method has two local constraint methods for pypsa opf + def block_constraint(network, snapshots): + """Block orders have p_max_pu = 0 for all periods outside their block. + the snapshots within the block have p_max_pu = order quantity. + This block however should be dispatched with constant quantity within this block. + Therefore, a set of constraints is given to the solver where + gen_p_t1 ==gen_p_t-1, gen_p_t2 ==gen_p_t1 ...see PyPSA github + for more information on structure of additional constraints. + + Unfortunately, this way of adding contraints works only for PyPSA 0.13.1 or older. + To-do: translate this method to new PyPSA versions. Help needed. + """ + #get all snapshots of the block (p_max!=0) + block_sn={} + for gen in network.generators.index: + try: + block_sn[gen] = network.generators_t.p_max_pu.loc[network.generators_t.p_max_pu[gen] !=0].index + except: + block_sn[gen] = None + constant_block ={} + for gens_i, gen in enumerate(network.generators.index): + + if block_sn[gen] is not None: + affected_sns = block_sn[gen] + affected_sns_shifted =[affected_sns[-1]] + list(affected_sns[:-1]) + for i in range(len(affected_sns)): + lhs = LExpression([(1,network.model.generator_p[gen, affected_sns[i]])]) + rhs = LExpression([(1,network.model.generator_p[gen, affected_sns_shifted[i]])]) + constant_block[gen, affected_sns[i]]= LConstraint(lhs,"==",rhs) + + affected_generators = [k for k, v in block_sn.items() if v is not None] + gen_sns_index =[] + for gen in affected_generators: + for sn in block_sn[gen]: + gen_sns_index +=[(gen, sn)] + #dictionary of LContraints is given to pypsa.opt.l_constraint (set) + l_constraint(network.model, "block_constraint", constant_block, + gen_sns_index ) + def balance_threshold (network, snapshots): + #all orders up and down need to be selected + gen_up = list(network.generators.loc[network.generators.index.isin( + list(upsupply['order_id']))].index) + gen_down = list(network.generators.loc[network.generators.index.isin( + list(downsupply['order_id']))].index) + #solver needs at least one variable. Therefore these 0MW placeholders are used, + #in case there are no orders in a direction + if not gen_up: + gen_up =['placeholder_gen_up'] + if not gen_down: + gen_down =['placeholder_gen_down'] + + imb_upper={} + imb_lower={} + for sn in snapshots: + rhs = LExpression([(1,sum(network.model.generator_p[gen,sn] for gen in gen_up))]) + self.imb_threshold + lhs = LExpression([(1,sum(network.model.generator_p[gen,sn] for gen in gen_down))]) + imb_upper[sn]= LConstraint(lhs,"<=",rhs) + + lhs = LExpression([(1,sum(network.model.generator_p[gen,sn] for gen in gen_up))]) + rhs = LExpression([(1,sum(network.model.generator_p[gen,sn] for gen in gen_down))]) + self.imb_threshold + imb_lower[sn]= LConstraint(lhs,"<=",rhs) + l_constraint(network.model, "imbalance_constraint_upper", imb_upper, list(snapshots)) + l_constraint(network.model, "imbalance_constraint_lower", imb_lower, list(snapshots)) + #execute both constraint methods + if (self.rules['order_types']== 'limit_ISP')|( + self.rules['order_types']== 'limit_block')|( + self.rules['order_types']== 'IDCONS_orders'): + block_constraint(network, snapshots) + balance_threshold (network, snapshots) + + #set the minimum order clearing + if (self.rules['order_types']== 'all_or_none_ISP')|( + self.rules['order_types']== 'all_or_none_block'): + pmin = 1 #because all-or-none + elif (self.rules['order_types']== 'limit_ISP')|( + self.rules['order_types']== 'limit_block')|( + self.rules['order_types']== 'IDCONS_orders') : + pmin = 0 #because orders are limit orders (partial call possible) + + if not self.rules['order_types']== 'IDCONS_orders': + #get all available orders for redispatch + buyorders = self.obook.get_obook_as_multiindex(selection='buyorders', incl_location = True) + sellorders = self.obook.get_obook_as_multiindex(selection='sellorders', incl_location = True) + + #exclude all block orders in case only orders per ISP are allowed + if (self.rules['order_types']== 'all_or_none_ISP')|( + self.rules['order_types']== 'limit_ISP'): + #exclude ordertypes block-orderes (delivery duration > 1) + buyorders = buyorders.loc[buyorders['delivery_duration'] == 1].copy() + sellorders = sellorders.loc[sellorders['delivery_duration'] == 1].copy() + + downdemand = buyorders.loc[buyorders['order_type'] == 'redispatch_demand'] + updemand = sellorders.loc[sellorders['order_type'] == 'redispatch_demand'] + downsupply = buyorders.loc[buyorders['order_type'] == 'redispatch_supply'] + upsupply = sellorders.loc[sellorders['order_type'] == 'redispatch_supply'] + else: + #in case of IDCONS, the orders are retrieved from the intraday orderbook (not from the redispatch orderbook) + downsupply = self.model.IDM_obook.get_obook_as_multiindex(selection='buyorders', incl_location = True) + upsupply = self.model.IDM_obook.get_obook_as_multiindex(selection='sellorders', incl_location = True) + #filter on IDCONS_orders + downsupply= downsupply.loc[downsupply['order_type']=='IDCONS_order'].copy() + upsupply= upsupply.loc[upsupply['order_type']=='IDCONS_order'].copy() + #get the redispatch demand orders from the redispatch orderbook + downdemand = self.obook.get_obook_as_multiindex(selection='buyorders', incl_location = True) + updemand = self.obook.get_obook_as_multiindex(selection='sellorders', incl_location = True) + #ensure that only redispatch demand orders are involved + downdemand =downdemand.loc[downdemand['order_type'] == 'redispatch_demand'].copy() + updemand =updemand.loc[updemand['order_type'] == 'redispatch_demand'].copy() + + if (updemand.empty & downdemand.empty) |(upsupply.empty & downsupply.empty): + #no need to calculate anything + return (DataFrame()) + + upsupply.reset_index(inplace=True) + downsupply.reset_index(inplace=True) + #calculate end delivery mtu of orders + upsupply['end_delivery_mtu'] = upsupply.apply(lambda x: (x['delivery_location'],) + + self.model.clock.calc_delivery_period_end((x['delivery_day'],x['delivery_time'] + ), x['delivery_duration']),axis=1) + downsupply['end_delivery_mtu'] = downsupply.apply(lambda x:(x['delivery_location'],) + + self.model.clock.calc_delivery_period_end((x['delivery_day'],x['delivery_time'] + ), x['delivery_duration']),axis=1) + + upsupply = upsupply.set_index(['delivery_location', 'delivery_day', 'delivery_time']) + downsupply = downsupply.set_index(['delivery_location', 'delivery_day', 'delivery_time']) + updemand = updemand.reset_index().set_index(['delivery_location', 'delivery_day', 'delivery_time']) + downdemand = downdemand.reset_index().set_index(['delivery_location', 'delivery_day', 'delivery_time']) + + ##filter supply orders to keep only supply with demand-overlapping delivery periods + upsupply = upsupply[(upsupply.index.isin(updemand.index))|( + upsupply['end_delivery_mtu'].isin(updemand.index))] + downsupply = downsupply[(downsupply.index.isin(downdemand.index))|( + downsupply['end_delivery_mtu'].isin(downdemand.index))] + + upsupply=upsupply.reset_index() + downsupply=downsupply.reset_index() + + if (updemand.empty & downdemand.empty) |(upsupply.empty & downsupply.empty): + #no need to calculate anything + return (DataFrame()) + #add snapshots for the calculation + indx = list(self.model.schedules_horizon.index.values) + snap = DataFrame(index=self.model.schedules_horizon.index) + snap = snap.reset_index() + snap['strIndex']=snap['delivery_day'].map(str)+str('_')+snap['delivery_time'].map(str) + commit_model.set_snapshots(snap['strIndex']) + + #all remaining supply orders are added as generators to the pypsa model + for i in range(len(upsupply)): + order = upsupply.iloc[i] + commit_model.add('Generator',order['order_id'] ,bus = order['delivery_location'] + '_up', + committable = True, + #pmin is either 0 (limit orders) or 1 (all-or-none orders) + p_min_pu = pmin, + min_up_time = order['delivery_duration'], + marginal_cost = order['price'], + p_nom = order['quantity']) + #use schedules horizon to make delivery period + delivery_period= DataFrame(columns= ['pmax_pu'], index=self.model.schedules_horizon.index) + delivery_period['pmax_pu'] = 0 + #get index value from the list of schedules_horizon index of delivery period start + start = indx.index((order['delivery_day'], order['delivery_time'])) + delivery_duration= order['delivery_duration'] + end = start + delivery_duration + delivery_period.loc[indx[int(start):int(end)], 'pmax_pu'] = 1 + commit_model.generators_t.p_max_pu[order['order_id']]=list(delivery_period['pmax_pu']).copy() + + for i in range(len(downsupply)): + order = downsupply.iloc[i] + commit_model.add('Generator',order['order_id'] ,bus=order['delivery_location'] + '_down', + committable = True, + p_min_pu = pmin, + min_up_time = order['delivery_duration'], + #make price negative to consider that downward are buy orders + #(provider pays when price is positive) + marginal_cost = -order['price'], + p_nom = order['quantity']) + #use schedules horizon to make delivery period + delivery_period= DataFrame(columns= ['pmax_pu'], index=self.model.schedules_horizon.index) + delivery_period['pmax_pu'] = 0 + #get index value from the list of schedules_horizon index of delivery period start + start = indx.index((order['delivery_day'], order['delivery_time'])) + delivery_duration= order['delivery_duration'] + end = start + delivery_duration + delivery_period.loc[indx[int(start):int(end)], 'pmax_pu'] = 1 + commit_model.generators_t.p_max_pu[order['order_id']]=list(delivery_period['pmax_pu']).copy() + + #prepare redispatch demand per area. + downdemand_per_area = pd.concat([DataFrame(index=self.model.schedules_horizon.index), downdemand['quantity'].unstack(level=0)], axis=1) + updemand_per_area = pd.concat([DataFrame(index=self.model.schedules_horizon.index), updemand['quantity'].unstack(level=0)],axis =1) + + for area in downdemand_per_area.columns: + commit_model.loads_t.p_set['down_demand_'+area] = list(downdemand_per_area[area]) + for area in updemand_per_area.columns: + commit_model.loads_t.p_set['up_demand_'+ area] = list(updemand_per_area[area]) + commit_model.loads_t.p_set.fillna(value=0, inplace=True) + #run PyPSA + commit_model.lopf(commit_model.snapshots, solver_name= self.model.exodata.solver_name, + extra_functionality = redispatch_solution_constraints, free_memory={'pypsa'}) + generators = commit_model.generators_t.p + generators = generators.loc[:,(generators>0).any(axis=0)].copy() + #get cleared quantity per order. Mean instead of sum, because of possible block orders + cleared_quantity= {} + for c in generators.columns: + cleared_quantity[c] = generators[c].loc[generators[c]>0].mean() + + ##select matched orders based on order_id list + matched = downsupply.loc[downsupply['order_id'].isin(list(cleared_quantity.keys()))] + matched = pd.concat([matched,upsupply.loc[upsupply['order_id'].isin(list(cleared_quantity.keys()))]]) + + for oid in list(cleared_quantity.keys()): + matched['cleared_quantity'].loc[matched['order_id'] == oid] = cleared_quantity[oid] + + #calculate remaining quantity of limit orders + matched['rem_vol'] = matched['quantity'] - matched['cleared_quantity'] + #pay as bid + matched['cleared_price'] = matched['price'] + + if self.rules['order_types']== 'IDCONS_orders': + #remove fully matched IDCONS orders from ID obook + full_match = matched.loc[matched['rem_vol']==0].copy() + self.model.IDM_obook.remove_matched_orders(full_match) + #adjust order quantities in orderbook which are partially matched + part_match = matched.loc[matched['rem_vol']!=0].copy() + part_match = part_match.loc[~part_match['order_id'].isin(full_match['order_id'])] + #drop all orders which are several times partially cleared. + #only keep the last, because this is the smallest by definition + part_match.drop_duplicates(subset = 'order_id', keep = 'last', inplace = True) + self.model.IDM_obook.adjust_partial_match_orders(part_match) + + #remove matched supply orders from all order list + self.obook.remove_matched_orders(matched) + return (matched) + + else: + raise Exception('redispatch clearing type - ordertype combination not known') + + def clear_redispatch(self): + self.obook.update_orders() + invoice = self.match_redispatch_orders() + invoice['due_amount'] = None + + if invoice.empty: + print("no redispatch orders cleared") + else: + #dublicate orders with a delivery duration > 1 to ensure correct administration in reports and settlement + if invoice.loc[invoice['delivery_duration']>1].empty: + pass + else: + blocks = invoice.loc[invoice['delivery_duration']>1] + for i in range(len(blocks)): + df = DataFrame( + [blocks.iloc[i]] * int(blocks['delivery_duration'].iloc[i] - 1)) + day_lst, mtu_lst = self.model.clock.calc_delivery_period_range( + blocks['delivery_day'].iloc[i], + blocks['delivery_time'].iloc[i], + blocks['delivery_duration'].iloc[i]) + df['delivery_day'] = day_lst + df['delivery_time'] = mtu_lst + invoice = invoice.append(df, ignore_index = True) + + #report to cleared orderbook + self.obook.cleared_sellorders = self.obook.cleared_sellorders.append(invoice.loc[invoice['direction']== 'sell']) + self.obook.cleared_buyorders = self.obook.cleared_buyorders.append(invoice.loc[invoice['direction']== 'buy']) + if self.rules['pricing_method']== 'pay_as_bid': + invoice['due_amount']= invoice['cleared_quantity'] * invoice['cleared_price']/4 + #make buy due amounts negative + invoice['due_amount']= invoice['due_amount'].where(invoice['direction']=='sell',-1*invoice['due_amount']) + else: + raise Exception('redispatch pricing method not known') + for agent in self.model.schedule.agents: + transaction = invoice.loc[invoice['agent_id'] == agent.unique_id, 'due_amount'].sum() + agent.accepted_red_orders = invoice[invoice['agent_id'] == agent.unique_id] + agent.money += transaction + print("agent {} gets {} Euro for redispatch".format(agent.unique_id, transaction)) + self.model.aGridAndSystemOperator.money -= transaction + #provide matched orders to aGridAndSystemOperator for redispatch demand determination in next round + self.model.aGridAndSystemOperator.red_procured = invoice.copy() + + +class MO_dayahead(MarketOperator): + def __init__(self, model, orderbook, market_rules): + MarketOperator.__init__(self, model, orderbook, market_rules) + #for initial asset status approximation + self.test_init_dispatch=DataFrame() + if self.rules.loc['gate_opening_time']=='D-1, MTU 44': + #attention: this gate-opening time is an absolute MTU of a day + self.gate_opening_time = 44 + else: + raise Exception('DA gate opening time value not known') + if self.rules.loc['gate_closure_time']=='D-1, MTU 45': + #attention: this gate closure-time is ALSO an absolute MTU of a day (in contrast to continous markets) + self.gate_closure_time = 45 + else: + raise Exception('DA gate opening time value not known') + if self.rules['acquisition_method']== 'single_hourly_auction': + #initiate pypsa model for optimal unit commitment. + #Note that this method assumes a single-sided auction (inelastic demand) instead of a double-sided auction (with elastic demand/ demand response) + #value of lost load + Voll= 10000 + self.commit_model = pypsa.Network() + self.commit_model.add("Bus","bus") + self.commit_model.add("Load","load",bus="bus") + #add a generator that captures the unfeasible demand (open long position) + self.commit_model.add("Generator",'short_position' ,bus="bus", + committable=True, + p_min_pu= 0, + marginal_cost = Voll, + p_nom=10000) + #add a storage unit to capture unfeasible demand (open short positions) + self.commit_model.add("Generator",'long_position',bus="bus", + committable = True, + p_min_pu= 1, + p_max_pu= 0, + p_nom=-10000, + marginal_cost= -Voll) + all_assets = self.model.exodata.asset_portfolios + for i in range(len(all_assets)): + self.commit_model.add("Generator",all_assets.loc[i,'asset_name'] ,bus="bus", + committable = True, + p_min_pu = all_assets.loc[i,'pmin']/all_assets.loc[i,'pmax'], + marginal_cost = all_assets.loc[i,'srmc'], + p_nom=all_assets.loc[i,'pmax'] + ) + #intertemp constraints devided by 4 because it is from mtu to hourly. + self.commit_model.generators.start_up_cost[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'start_up_cost']/4 + self.commit_model.generators.shut_down_cost[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'shut_down_cost']/4 + #Other intertemp constraints not considered in DA, as tests showed that the results are not improving. + # self.commit_model.generators.min_up_time[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'min_up_time']/4 + # self.commit_model.generators.min_down_time[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'min_down_time']/4 + # self.commit_model.generators.ramp_limit_down[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'ramp_limit_down']/4 + # self.commit_model.generators.ramp_limit_up[all_assets.loc[i,'asset_name']] = all_assets.loc[i,'ramp_limit_up']/4 + + def match_dayahead_orders(self): + if (self.rules['acquisition_method']== 'exo_default')|(self.rules['acquisition_method']== 'exo_imbalance_case'): + return(None) #no clearing needed + if self.rules['acquisition_method']== 'single_hourly_auction': + #make hourly snapshots. The MTU of the day and the step of the simulation determines which snapshots are used. + if (self.model.clock.get_MTU() >= self.gate_closure_time) & (self.model.schedule.steps == 0)&( + self.model.schedules_horizon.index.get_level_values(0)[-1] >= self.model.clock.get_day()+1): + #horizon includes next day and all hours of remaining current day + day = [self.model.clock.get_day()]*(24 - self.model.clock.get_hour() + 1) + [ + self.model.clock.get_day() + 1] * 24 + hours = list (range(self.model.clock.get_hour(),25)) + list(range(1,25)) + elif (self.model.clock.get_MTU() < self.gate_closure_time) & (self.model.schedule.steps == 0): + #horizon includes only current day + day = [self.model.clock.get_day()]*(24 - self.model.clock.get_hour() + 1) + hours = list (range(self.model.clock.get_hour(),25)) + elif (self.model.schedule.steps == 0)&( + self.model.schedules_horizon.index.get_level_values(0)[-1] < self.model.clock.get_day()+1): + #when first step but horizon only includes current day + day = [self.model.clock.get_day()]*(24 - self.model.clock.get_hour() + 1) + hours = list(range(self.model.clock.get_hour(),25)) + elif (self.model.clock.get_MTU() == self.gate_closure_time)&( + self.model.schedules_horizon.index.get_level_values(0)[-1] >= self.model.clock.get_day()+1): + #when DA GCT and next day is in the horizon of the simulation + day = [self.model.clock.get_day()+1]*(24) + hours = list(range(1,25)) + else: + #horizon shows that simulation is at end. No additional DA auction + return (DataFrame(), DataFrame()) + snap= DataFrame(columns =['delivery_day','delivery_hour']) + snap['delivery_day']=day + snap['delivery_hour']=hours + snap['strIndex'] = snap['delivery_day'].map(str)+str('_')+snap['delivery_hour'].map(str) + #get the residual load fom exo-data class + resload_lst = self.model.exodata.get_DA_resload(snap, mode = self.model.exodata.sim_task['residual_load_scenario']) + if not resload_lst: + print("no residual load values in exo database for DA auction horizon") + print("auction not executed") + return(DataFrame(), DataFrame()) + elif len(resload_lst) < len(snap): + print("not sufficient residual load values in exo database for the complete DA auction horizon. Auction horizon reduced to data") + snap = snap.iloc[:len(resload_lst)] + self.commit_model.set_snapshots(snap['strIndex']) + self.commit_model.loads_t.p_set['load']= resload_lst + + #consider unavailabilities of assets: + indx = snap.set_index(['delivery_day','delivery_hour']).index + for agent in self.model.schedule.agents: + for asset in agent.assets['object']: + if not asset.planned_unavailability.empty: + #convert unavailabilities in from mtu to mean per hour + unav_h = (asset.planned_unavailability.reset_index().groupby(by=['delivery_day','delivery_hour']).mean()/asset.pmax).copy() + self.commit_model.generators_t.p_max_pu[asset.assetID] = unav_h.loc[indx,'p_max_t'].values.copy() + self.commit_model.generators_t.p_min_pu[asset.assetID] = unav_h.loc[indx,'p_min_t'].values.copy() + + #daytime of last dispatch for initial generator status determination + if not self.model.rpt.prices['DAP'].empty: + init_time= self.model.rpt.prices['DAP'].index[-1] + else: + # a test run is needed to determine initial dispatch + init_time= None + try: + #default is status 1 for start + self.commit_model.generators.initial_status = 1 + self.commit_model.lopf(self.commit_model.snapshots, solver_name= self.model.exodata.solver_name, free_memory={'pypsa'}) + self.test_init_dispatch = self.commit_model.generators_t.p.copy().iloc[0] + self.test_init_dispatch.loc[self.test_init_dispatch > 0] = 1 + self.test_init_dispatch.loc[self.test_init_dispatch == 0] = 0 + + except: + import pdb + pdb.set_trace() + for agent in self.model.schedule.agents: + for asset in agent.assets['object']: + #ensure that the initial status taken into account + if init_time: + last_dispatch = asset.schedule.loc[init_time, 'commit'] + if last_dispatch > 0: + self.commit_model.generators.initial_status[asset.assetID] = 1 + else: + self.commit_model.generators.initial_status[asset.assetID] = 0 + else: + self.commit_model.generators.initial_status[asset.assetID]= self.test_init_dispatch[asset.assetID] + + try: + self.commit_model.lopf(self.commit_model.snapshots, solver_name= self.model.exodata.solver_name, free_memory={'pypsa'}) + except: + import pdb + pdb.set_trace() + opt_dispatch = self.commit_model.generators_t.p.copy() + clearing_prices= opt_dispatch.copy() + opt_dispatch['DA_residual_load'] = self.commit_model.loads_t.p.copy() + #convert index back again from PyPSA style to ASAM style. + opt_dispatch.index = opt_dispatch.index.str.split(pat='_', expand =True) + opt_dispatch.index.set_names(['delivery_day','delivery_hour'], inplace=True) + #make inters from index + opt_dispatch.reset_index(inplace=True) + opt_dispatch[['delivery_day','delivery_hour']] = opt_dispatch[['delivery_day','delivery_hour']].astype('int64') + #determine clearing price (highest cost (order price) of dispatched generator) + costs= self.commit_model.generators['marginal_cost'] + for generator in costs.index: + clearing_prices[generator] =clearing_prices[generator].where( + clearing_prices[generator]==0, costs[generator]) + clearing_prices['clearing_price'] = clearing_prices.max(axis=1) + #drop all other columns + clearing_prices = clearing_prices['clearing_price'] + #convert index again + clearing_prices.index = clearing_prices.index.str.split(pat='_', expand =True) + clearing_prices.index.set_names(['delivery_day','delivery_hour'], inplace=True) + #make inters from index + clearing_prices=clearing_prices.reset_index() + clearing_prices[['delivery_day','delivery_hour']] = clearing_prices[['delivery_day','delivery_hour']].astype('int64') + + #this reporting method converts the hourly prices to MTU prices + self.model.rpt.publish_DAM_prices(clearing_prices) + if (opt_dispatch['short_position']>0).any(): + print('DA dispatch with adequacy issues short') + print(opt_dispatch['short_position']) + import pdb + pdb.set_trace() + if (opt_dispatch['long_position']>0).any(): + print('DA dispatch with adequacy issues long') + print(opt_dispatch['long_position']) + import pdb + pdb.set_trace() + + return (opt_dispatch, clearing_prices) + + else: + raise Exception ('DA celaring type not known') + + def clear_dayahead(self): + if self.rules['acquisition_method']== 'single_hourly_auction': + #Note: this method does not include the possibility to buy electricity on DAM. + #Only producing assets considered on DA (i.e. single-sided auction instead of double-sided auction) + if (self.model.clock.get_MTU() == self.gate_closure_time)|( + self.model.schedule.steps == 0): + print('clear and settle Day ahed market with a single doublesided auction (based on PyPSA)') + + matches, clearing_prices = self.match_dayahead_orders() + if (matches.empty) & (clearing_prices.empty): + #no auction results. No settlement needed + return + #remove generators with 0 dispatch + matches = matches.loc[:,(matches > 0).any()] + #add clearing price to matches df + matches['cleared_price'] = clearing_prices.iloc[:,2].values + #enlarge the matches to get results per mtu of 15 minutes + matches = pd.concat([matches,matches,matches,matches]) + matches.sort_index(inplace=True) + if (self.model.clock.get_MTU() >= self.gate_closure_time) & ( + self.model.schedule.steps == 0): + #get the right delivery mtu from the schedules horizon + mtus = list(self.model.schedules_horizon.index.get_level_values(1)) + elif (self.model.clock.get_MTU() < self.gate_closure_time) & ( + self.model.schedule.steps == 0): + mtus = list(range(self.model.clock.get_MTU(),97)) + elif self.model.clock.get_MTU() == self.gate_closure_time: + mtus = list(range(1,97)) + #cut of MTUs of a first hour that lies in the past. + matches = matches.iloc[len(matches)-len(mtus):] + matches['delivery_time'] = mtus + matches.drop('delivery_hour', axis=1, inplace = True) + #make artificially cleared DA orders + for agent in self.model.schedule.agents: + assets = list(self.model.exodata.asset_portfolios.loc[ + self.model.exodata.asset_portfolios['asset_owner'] == agent.unique_id,'asset_name']) + for i in assets: + DA_cl_orders = DataFrame() + DA_cl_orders[['delivery_day', 'delivery_time']] = matches[['delivery_day', 'delivery_time']] + DA_cl_orders['associated_asset'] = i + DA_cl_orders['direction'] = 'sell' + DA_cl_orders['agent_id'] = agent.unique_id + DA_cl_orders['order_type'] = 'limit order' + #hourly auctions (4mtu), but we split them for the administration in 15 minute mtus + DA_cl_orders['delivery_duration'] = 1 + if i in matches.columns: + #divide cleared order quantity by 4 to cop with the split from hour to mtu + DA_cl_orders['cleared_quantity'] = matches[i] + DA_cl_orders['cleared_price']= matches['cleared_price'] + #right order format. missing columsn are nan. which is not an issue. + DA_cl_orders = DA_cl_orders.loc[:,['agent_id','associated_asset', + 'delivery_location','cleared_quantity', + 'cleared_price', 'delivery_day','delivery_time', + 'order_type','init_time', 'order_id', 'direction', 'delivery_duration']] + #cleared price divided by 4 because its a price per MWh and we split the hours to 15 minutes mtu + DA_cl_orders['due_amount']= DA_cl_orders['cleared_quantity'] * DA_cl_orders['cleared_price']/4 + agent.money += DA_cl_orders['due_amount'].sum() + agent.accepted_DA_orders = pd.concat([agent.accepted_DA_orders, + DA_cl_orders]) + #add orders to reporter + self.obook.cleared_sellorders = self.obook.cleared_sellorders.append(DA_cl_orders) + #also add the orders to the artificial sellorderbook + DA_cl_orders = DA_cl_orders.drop(['cleared_quantity','cleared_price', 'due_amount'], axis=1).copy() + DA_cl_orders['quantity']= agent.assets.loc[i].item().pmax + DA_cl_orders['price']= agent.assets.loc[i].item().srmc + DA_cl_orders = DA_cl_orders.loc[:,['agent_id','associated_asset', + 'delivery_location','quantity', + 'price', 'delivery_day','delivery_time', + 'order_type','init_time', 'order_id', 'direction', 'delivery_duration']] + #add orders to reporter + self.obook.sellorders_full_step = self.obook.sellorders_full_step.append(DA_cl_orders) + + + #administration of 'Central_DA_residual_load_entity' for DA buy orders + #this is a simplification. + DA_cl_orders = DataFrame() + DA_cl_orders['cleared_quantity'] = matches['DA_residual_load'] + DA_cl_orders[['delivery_day', 'delivery_time','cleared_price']] = matches[['delivery_day', 'delivery_time','cleared_price']] + DA_cl_orders['associated_asset'] = 'DA_residual_load' + DA_cl_orders['direction'] = 'buy' + DA_cl_orders['agent_id'] = 'Central_DA_residual_load_entity' + DA_cl_orders['order_type'] = 'limit order' + + #right order format. Missing columns are nan, which is not an issue. + DA_cl_orders = DA_cl_orders.loc[:,['agent_id','associated_asset','delivery_location','cleared_quantity','cleared_price', 'delivery_day','delivery_time','order_type','init_time', 'order_id', 'direction']] + #cleared price divided by 4 because its a price per MWh and we split the hours to 15 minutes mtu + DA_cl_orders['due_amount']= - DA_cl_orders['cleared_quantity'] * DA_cl_orders['cleared_price']/4 + #add orders to reporter + + self.obook.cleared_buyorders = self.obook.cleared_buyorders.append(DA_cl_orders) + + DA_cl_orders = DA_cl_orders.drop(['cleared_quantity','cleared_price', 'due_amount'], axis=1).copy() + DA_cl_orders['quantity']= matches['DA_residual_load'] + DA_cl_orders['price']= np.nan + DA_cl_orders = DA_cl_orders.loc[:,['agent_id','associated_asset', + 'delivery_location','quantity', + 'price', 'delivery_day','delivery_time', + 'order_type','init_time', 'order_id', 'direction', 'delivery_duration']] + + + #add orders to reporter + self.obook.buyorders_full_step = self.obook.buyorders_full_step.append(DA_cl_orders) + self.obook.buyorders_full_step[['delivery_day', 'delivery_time','quantity']] = self.obook.buyorders_full_step[['delivery_day', 'delivery_time','quantity']].astype('int64') + else: + #no DA auction in this round + pass + else: + raise Exception ('DA clearing type for settlement not known') + + +class MO_balancing_energy(MarketOperator): + def __init__(self, model, orderbook, market_rules): + MarketOperator.__init__(self, model, orderbook, market_rules) + if self.rules['acquisition_method']=='control_states_only': + """balancing energy market is simulted based on probabilies of the FRR control state (Dutch style)""" + self.control_state= None + else: + raise Exception('BEM clearing type not implemented: ', self.rules['acquisition_method']) + if self.rules.loc['gate_opening_time']=='D-1, MTU 56': + #gate opening time is an absolute MTU of a day. + self.gate_opening_time = 56 + else: + raise Exception('BEM gate opening time value not known') + if self.rules.loc['gate_closure_time']=='deliveryMTU-2': + #attention: this gate- closure-time is an RELATIVE MTU of until the first delivery MTU (in contrastt to single auctions) + #+1 because the gate closes at the beginnig of this relative MTU. + self.gate_closure_time = 2 +1 + else: + raise Exception('BEM gate opening time value not known') + + def balancing_energy_clearing(self): + """currently only determination of control state is implemented""" + print('balancing energy clearing of this MTU') + + if self.rules['acquisition_method']=='control_states_only': + #orders need to updated anyways, even when not cleared. + self.obook.update_orders() + else: + pass + + def determine_control_state(self): + """determine the FRR control state based on exogenious probablities per mtu of the day""" + #balancing of this step (step counter has already proceeded, so calculate mtu back) + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + #seed to make randomness controlable over various simulations + if self.model.IB_marketoperator.rules['pricing_method']!='exogenious': + #Uses the last step rank number of a fixed agent + seed = self.model.MP_dict[list(self.model.MP_dict.keys())[0]].step_rank + MTU + day + + probabilities =self.model.exodata.control_state_probabilities.loc[ + self.model.exodata.control_state_probabilities['MTU']== MTU,['control_state','probability']] + #get random sample of control state, given the probabilities per state of MTU of day + self.control_state = np.random.RandomState(seed).choice(probabilities.iloc[:,0], p=probabilities.iloc[:,1]) + else: + #control state obtained from exogenious data + self.control_state = self.model.exodata.IBP_exo_prices.loc[(self.model.exodata.IBP_exo_prices['delivery_day']==day)&( + self.model.exodata.IBP_exo_prices['delivery_time']==MTU), + 'control_state'].iloc[0] + self.model.rpt.publish_BEM_control_state(self.control_state, day, MTU) + + +class MO_imbalance(MarketOperator): + def __init__(self, model, orderbook, market_rules): + MarketOperator.__init__(self, model, orderbook, market_rules) + #Imbalance price of current MTU + self.IBP_long = None + self.IBP_short = None + #sum of all market imbalances of current MTU + self.imbalances_long = None + self.imbalances_short = None + #current MTU tuple + self.cur_imbalance_settlement_period = None + + if self.rules.loc['gate_opening_time']=='deliveryMTU': + self.gate_opening_time = 0 + else: + raise Exception('BEM gate opening time value not known') + if self.rules.loc['gate_closure_time']=='deliveryMTU': + self.gate_closure_time = 0 + else: + raise Exception('BEM gate opening time value not known') + + + if isinstance(self.rules['pricing_method'], int): + #a integer value as imbalance pricing method means that a fixed penalty is payed for any imbalance + self.IBP_fixed = self.rules['pricing_method'] + elif (self.rules['pricing_method']=='Dutch_IB_pricing'): + if self.model.exodata.IBP_kde_pdfs.empty: + raise Exception ('IBP_pdf_sample method for the imbalance market requires IBP_kde_pdf in exodata') + else: + DAP_bin_left = self.model.exodata.IBP_kde_pdfs.index.get_level_values('[DAP_left_bin').unique().tolist() + DAP_bin_right = self.model.exodata.IBP_kde_pdfs.index.get_level_values('DAP_right_bin)').unique().tolist() + self.DAP_bins =DataFrame({'DAP_bin_left': DAP_bin_left, 'DAP_bin_right': DAP_bin_right}) + elif self.rules['pricing_method']=='exogenious': + #imbalance prices are taken from exodatabase + pass + else: + raise Exception('the imbalance pricing method not known') + + def imbalance_clearing(self): + print('imbalance clearing') + + if isinstance(self.rules['pricing_method'], int): + print(' ...with fixed price',self.rules['pricing_method']) + #fixed price to be payed for every imbalance (independent from direction) + self.IBP_short = self.rules['pricing_method'] + self.IBP_long = self.rules['pricing_method'] + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + elif self.rules['pricing_method']=='Dutch_IB_pricing': + print('imbalance clearing of this MTU (dutch style)') + if self.model.exodata.sim_task['run_BEM[y/n]']!='y': + raise Exception ('for the imbalance pricing method Dutch_IB_pricing a BE market operator is needed but Simulation task states not to run BEM') + #clearing of this step mtu (step counter has already proceeded, so calculate mtu back) + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + try: + DAP = self.model.rpt.prices.loc[(day,MTU), 'DAP'] + except: + if self.model.rpt.prices['DAP'].isnull().all(): + print('no day-ahead prices available. Possibly because DAM is not run.') + print('Default DA price of 30 EUR/MWh used for (Dutch) imbalance clearing') + DAP= 30 + else: + DAP = self.model.rpt.prices.loc[(day,MTU), 'DAP'] + #get corresponding DAP bin egdes + DAP_bin =self.DAP_bins.loc[(self.DAP_bins['DAP_bin_left']<=DAP)&( + self.DAP_bins['DAP_bin_right']>DAP)] + + #Dutch imbalance pricing method (status 28-03-2019) + if self.model.BE_marketoperator.control_state == 1: + #get scipy.stats kde object from dataframe + pdf = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_short', + DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_up = round(pdf.resample()[0][0]) + self.IBP_short = + BEP_up + self.IBP_long = BEP_up + elif self.model.BE_marketoperator.control_state == -1: + #get scipy.stats kde object from dataframe + pdf = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_long', + DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_down = round(pdf.resample()[0][0]) + self.IBP_short = BEP_down + self.IBP_long = BEP_down + elif self.model.BE_marketoperator.control_state == 0: + #simplification: DAP as mid price + self.IBP_short = DAP + self.IBP_long = DAP + elif self.model.BE_marketoperator.control_state ==2: + #Attention: prices in FRR control state 2 are drawn independendly from each other. + #This is a simplification. + #get scipy.stats kde object from dataframe + pdf_up = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_short', + DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + pdf_down = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_long', + DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_up = round(pdf_up.resample()[0][0]) + BEP_down= round(pdf_down.resample()[0][0]) + + #reverse pricing (simplification DAP instead of mid price) + if (BEP_up < DAP)|(BEP_down > DAP): + self.IBP_short = DAP + self.IBP_long = DAP + else: + self.IBP_short = BEP_up + self.IBP_long = BEP_down + else: + raise Exception('imblance clearing requires balancing energy control state of 1,-1, 0 or 2') + + elif self.rules['pricing_method']=='exogenious': + """Imbalance prices are taken from the exogenious database. This makes sense when comparing various + simulations, in order to control randomness""" + #Clearing of this step mtu (step counter has already proceeded, so calculate mtu back) + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + + self.IBP_short = self.model.exodata.IBP_exo_prices.loc[(self.model.exodata.IBP_exo_prices['delivery_day']==day)&( + self.model.exodata.IBP_exo_prices['delivery_time']==MTU), + 'IBP_short'].iloc[0] + self.IBP_long = self.model.exodata.IBP_exo_prices.loc[(self.model.exodata.IBP_exo_prices['delivery_day']==day)&( + self.model.exodata.IBP_exo_prices['delivery_time']==MTU), + 'IBP_long'].iloc[0] + self.model.rpt.publish_IBM_prices(self.IBP_short, self.IBP_long, day, MTU) + + def imbalance_settlement(self): + #step counter has already proceeded, so calculate back, to settle current imbalance + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + #sum all imbalances + self.imbalances_long = 0 + self.imbalances_short = 0 + self.financial_return_ = 0 + self.cur_imbalance_settlement_period = (day, MTU) + + for agent in self.model.schedule.agents: + #negative means short position, positive means long position. + imbalance_quantity = agent.trade_schedule.loc[(day,MTU),'imbalance_position'] + if imbalance_quantity < 0.0: + #always if IBP is positive market party pays system operator + #short position + IBP = self.IBP_short + self.imbalances_short += imbalance_quantity + elif imbalance_quantity > 0.0: + #always if IBP is positive, market party receives from system operator + IBP = self.IBP_long + self.imbalances_long += imbalance_quantity + else: + IBP = 0 + agent.financial_return.loc[(day,MTU),'IB_return'] = imbalance_quantity * IBP / 4 + agent.money += imbalance_quantity * IBP / 4 + #payment inversed for System Operator + self.model.aGridAndSystemOperator.imb_return = - ( + self.imbalances_long*self.IBP_long + self.imbalances_short *self.IBP_short)/ 4 + + + def imbalance_clearing_4MTU(self): + """please note that this alternative method of distinguishing the MTU of an hour + will soon have less value, because EU cross-border trading is moving towards + 15-minute MTU. However, up to that moment this method may in some cases be useful. + To use it, the input (IBP_kde_pdfs) need to have the MTU of the hour in the index.""" + print('imbalance clearing') + + if isinstance(self.rules['pricing_method'], int): + print(' ...with fixed price',self.rules['pricing_method']) + #fixed price to be payed for every imbalance (independent from direction) + self.IBP_short = self.rules['pricing_method'] + self.IBP_long = self.rules['pricing_method'] + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + elif self.rules['pricing_method']=='Dutch_IB_pricing': + print('imbalance clearing of this MTU (dutch style)') + if self.model.exodata.sim_task['run_BEM[y/n]']!='y': + raise Exception ('for the imbalance pricing method Dutch_IB_pricing a BE market operator is needed but Simulation task states not to run BEM') + #TODO: last step: calculate to end of horizon. + #Clearing of this step mtu (step counter has already proceeded, so calculate mtu back) + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + try: + DAP = self.model.rpt.prices.loc[(day,MTU), 'DAP'] + except: + if self.model.rpt.prices['DAP'].isnull().all(): + print('no day-ahead prices available. Possibly because DAM is not run.') + print('Default DA price of 30 EUR/MWh used for (Dutch) imbalance clearing') + DAP= 30 + else: + #raise error of this: + DAP = self.model.rpt.prices.loc[(day,MTU), 'DAP'] + #get corresponding DAP bin egdes + DAP_bin =self.DAP_bins.loc[(self.DAP_bins['DAP_bin_left']<=DAP)&( + self.DAP_bins['DAP_bin_right']>DAP)] + #determine which MTU of an hour the current MTU is (1,2,3 or 4) + MTU_of_h = MTU%4 + if MTU_of_h == 0: + MTU_of_h = 4 + #Dutch imbalance pricing method (28-03-2019) + if self.model.BE_marketoperator.control_state == 1: + #get scipy.stats kde object from dataframe + pdf = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_short', + MTU_of_h,DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_up = round(pdf.resample()[0][0]) + self.IBP_short = + BEP_up + self.IBP_long = BEP_up + elif self.model.BE_marketoperator.control_state == -1: + #get scipy.stats kde object from dataframe + pdf = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_long', + MTU_of_h,DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_down = round(pdf.resample()[0][0]) + self.IBP_short = BEP_down + self.IBP_long = BEP_down + elif self.model.BE_marketoperator.control_state == 0: + #simplification: DAP as mid price + self.IBP_short = DAP + self.IBP_long = DAP + elif self.model.BE_marketoperator.control_state ==2: + + #Attention: prices in FRR control state 2 are drawn independendly from each other. + #This is a simplification. + #get scipy.stats kde object from dataframe + pdf_up = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_short', + MTU_of_h,DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + pdf_down = self.model.exodata.IBP_kde_pdfs['pdf'].loc[('IB_price_long', + MTU_of_h,DAP_bin.iloc[0,0], + DAP_bin.iloc[0,1])] + #get sample from kde pdf. returns nested array. therefore [0][0] + BEP_up = round(pdf_up.resample()[0][0]) + BEP_down= round(pdf_down.resample()[0][0]) + + #reverse pricing (simplification DAP instead of mid price) + if (BEP_up < DAP)|(BEP_down > DAP): + self.IBP_short = DAP + self.IBP_long = DAP + else: + self.IBP_short = BEP_up + self.IBP_long = BEP_down + else: + raise Exception('imblance clearing requires balancing energy control state of 1,-1, 0 or 2') + + elif self.rules['pricing_method']=='exogenious': + """Imbalance prices are taken from the exogenious database. This makes sense when comparing various + simulations, in order to control randomness""" + #Clearing of this step mtu (step counter has already proceeded, so calculate mtu back) + day, MTU=self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + + self.IBP_short = self.model.exodata.IBP_exo_prices.loc[(self.model.exodata.IBP_exo_prices['delivery_day']==day)&( + self.model.exodata.IBP_exo_prices['delivery_time']==MTU), + 'IBP_short'].iloc[0] + self.IBP_long = self.model.exodata.IBP_exo_prices.loc[(self.model.exodata.IBP_exo_prices['delivery_day']==day)&( + self.model.exodata.IBP_exo_prices['delivery_time']==MTU), + 'IBP_long'].iloc[0] + self.model.rpt.publish_IBM_prices(self.IBP_short, self.IBP_long, day, MTU) + + + + + diff --git a/asam classes/MarketParty.py b/asam classes/MarketParty.py new file mode 100644 index 0000000..38660ec --- /dev/null +++ b/asam classes/MarketParty.py @@ -0,0 +1,2334 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri May 5 11:44:08 2017 +@author: Samuel Glismann + +Market Party agent class of ASAM. +A Market party agent: + - has assets (ID, Pmax, Pmin, SMRC, Location) + - trade schedule + - a financial balance sheet + +Methods are: + - init + - step + - update_trade_schedule + - set_asset_contraints + - portfolio_optimization + - place_ID_orders + - place_RD_orders + - place_BE_orders + - small random quantities + - start_stop_blocks + - intra-day markup + - opportunity_markups + - start_stop_markups + - ramping_markup + - doublescore_markup + +Note: for portfolio_optimization PyPSA and Pyomo are applied. + +""" +from mesa import Agent, Model +from mesa.time import RandomActivation +from random import randrange, choice +import pandas as pd +import math +import pypsa +from pandas import Series, DataFrame +import numpy as np +from OrderMessage import * +from pyomo.environ import (ConcreteModel, Var, Objective, + NonNegativeReals, Constraint, Reals, + Suffix, Expression, Binary, SolverFactory) + +from pypsa.opt import (l_constraint, l_objective, LExpression, LConstraint) + +class MarketParty(Agent): + + def __init__(self, unique_id, model, assets = None, agent_strategy = None): + super().__init__(unique_id, model) + self.model = model + # Series of strategy items + self.strategy = agent_strategy + #DF with Asset() instances and unique ID as index + if len(assets.index) != len(assets.index.unique()): + raise Exception('asset ids must be unique per agent',assets.index) + else: + self.assets = assets + self.money = 0 + self.step_rank = None + #Variable to skip portfolio optimization (if no trade and no forecast changes, + #no optimization is calculated) + self.unchanged_position = False #values: True, False, 'forecast error' + #1000 as standard imbalance risk price. This is only used to provide an artificial price for market orders + try: + self.imbalance_risk_price = self.model.IB_marketoperator.IBP_fixed + except: + self.imbalance_risk_price = 1000 + + #Some strategy consistency checks + if not(self.strategy.loc['IBM_pricing']=='marginal_orderbook_strategy')|( + self.strategy.loc['IBM_pricing']=='market_order_strategy')|( + self.strategy.loc['IBM_pricing']=='impatience_curve'): + raise Exception ('imbalance pricing strategy not implemented in agents') + if (self.strategy.loc['IBM_pricing']=='impatience_curve')|(self.strategy.loc['IBM_quantity']=='impatience_curve'): + if not self.strategy.loc['IBM_pricing']==self.strategy.loc['IBM_quantity']: + raise Exception ('Agent strategy impatient_curve must be appled for both quantity and pricing strategy') + + #All orders should be in a non-multiindex format. The dataframes are emptied per round + orderlabels =['agent_id','associated_asset','delivery_location','quantity','price', + 'delivery_day','delivery_time','order_type','init_time', 'order_id', + 'direction','matched_order','cleared_quantity','cleared_price','rem_vol', 'due_amount'] + self.accepted_red_orders = DataFrame( columns = orderlabels) #emptied in set_asset_commit_constraints() + self.accepted_ID_orders = DataFrame(columns = orderlabels) #emptied in update_trade_schedule() + self.accepted_BE_orders = DataFrame(columns = orderlabels) #emptied in update_trade_schedule() + self.accepted_DA_orders = DataFrame(columns = orderlabels) #emptied in update_trade_schedule() + self.ordercount = 1 + self.trade_schedule = model.clock.asset_schedules_horizon() #manipulated every round. All trade positions where delivery periode>current time remain unchanged + self.trade_schedule= self.trade_schedule.reindex(columns= [ + 'DA_position','ID_position','RD_position','BE_position', + 'forecast_error','total_trade', 'imbalance_position', 'total_dispatch'], fill_value= 0) + self.financial_return = model.clock.asset_schedules_horizon() #manipulated every round. All returns where delivery periode>current time remain unchanged + self.financial_return = self.financial_return.reindex(columns = [ + 'DA_return','ID_return','RD_return', 'BE_return','IB_return', + 'total_return', 'total_dispatch_costs', 'profit']) + + + #initiate PyPSA model for optimal asset dispatch + self.commit_model = pypsa.Network() + self.commit_model.add("Bus","bus") + self.commit_model.add("Load","trade_position",bus="bus") + #add a generator that captures the unfeasible trade commitment (open short position) + self.commit_model.add("Generator",'short_position' ,bus="bus", + committable=True, + p_min_pu= 0, + marginal_cost=self.imbalance_risk_price, + p_nom=10000) + #add a negative generator unit to capture unfeasible trade commitment (open long positions) + #This generator has a negative price. + self.commit_model.add("Generator",'long_position',bus="bus", + committable = True, + p_min_pu= 1, + p_max_pu= 0, + p_nom=-10000, + marginal_cost= -self.imbalance_risk_price) + all_ids = self.assets.index.values + + #Include all assets of the portfolio + for i in range(len(all_ids)): + asset = self.assets.loc[all_ids[i],:].item() + self.commit_model.add("Generator",asset.assetID ,bus="bus", + committable=True, + p_min_pu= asset.pmin/asset.pmax, + marginal_cost = asset.srmc, + p_nom=asset.pmax) + #Agent strategy determines which constraints are taken into account during asset optimization + if self.strategy['ramp_limits']==True: + self.commit_model.generators.ramp_limit_up[asset.assetID] = asset.ramp_limit_up + self.commit_model.generators.ramp_limit_down[asset.assetID] = asset.ramp_limit_down + #TODO: find out what the impact of ramp_limit start stop is. + #PyPSA question not yet answered. start_stop ramps only used in startstop price determination. +# self.commit_model.generators.ramp_limit_start_up[asset.assetID] = asset.ramp_limit_start_up +# self.commit_model.generators.ramp_limit_shut_down[asset.assetID] = asset.ramp_limit_shut_down + if self.strategy['start_stop_costs']==True: + self.commit_model.generators.start_up_cost[asset.assetID] = asset.start_up_cost + self.commit_model.generators.shut_down_cost[asset.assetID] = asset.shut_down_cost + if self.strategy['min_up_down_time']==True: + self.commit_model.generators.min_up_time[asset.assetID] = asset.min_up_time + self.commit_model.generators.min_down_time[asset.assetID] = asset.min_down_time + + def step(self): + """ + Step method executes all agent methods. + Note: the order of the methods matters. + """ + #add 1 to agent step order and store for report + self.model.agent_random_step_index += 1 + #trace random step rank + self.step_rank = self.model.agent_random_step_index + + self.update_trade_schedule(positions=['DA_position','ID_position','RD_position','BE_position']) + self.set_asset_commit_constraints() + self.portfolio_dispatch() + #place_ID_order leads to instatanous IDM clearing + self.place_ID_orders() + if not self.accepted_ID_orders.empty: + print('processing ID clearing result with another trade schedule update and portfolio optimization') + #after instantanous clearing, another portfolio optimization is needed before redispatch orders can be made + self.update_trade_schedule(positions=['ID_position']) + self.portfolio_dispatch() + self.place_RD_orders() + self.place_BE_orders() + + + def update_trade_schedule(self, positions =[]): + """ + Method: + Aggregates all offered orders that led to transactions into a trade schedule. + Moreover, a financial balance sheet is updated with the financial returns. + Input: + positions (list): includes all positions to be updated + + """ + print('start update trade schedule of Agent ', self.unique_id) + if (self.accepted_red_orders.empty)&(self.accepted_ID_orders.empty)&( + self.accepted_DA_orders.empty)&( + self.accepted_BE_orders.empty)&( + self.unchanged_position != 'forecast_error'): + #no new trades and no new forecast errors in last round + self.unchanged_position = True + print('no position has changed of this agent') + else: + self.unchanged_position = False + new_trade_schedule = self.model.schedules_horizon.copy() + new_trade_schedule = new_trade_schedule.add(self.trade_schedule,fill_value = 0) + new_trade_returns = self.model.schedules_horizon.copy() + new_trade_returns = new_trade_returns.add(self.financial_return,fill_value = 0) + if self.unchanged_position == False: + for i in positions: + new_transactions = DataFrame() + if i == 'DA_position': + new_transactions =self.accepted_DA_orders[['delivery_day','delivery_time' + ,'cleared_quantity','direction', 'due_amount']] + k = 'DA_return' + #clear accepted_orders DataFrame. Will be filled again after settlement this round + self.accepted_DA_orders = self.accepted_DA_orders.iloc[0:0] + elif i == 'ID_position': + new_transactions =self.accepted_ID_orders[['delivery_day','delivery_time' + ,'cleared_quantity','direction', 'due_amount']] + k = 'ID_return' + + #clear accepted_orders DataFrame. Will be filled again after settlement this round + self.accepted_ID_orders = self.accepted_ID_orders.iloc[0:0] + elif i == 'RD_position': + new_transactions =self.accepted_red_orders[['delivery_day','delivery_time' + ,'cleared_quantity','direction', 'due_amount']] + k = 'RD_return' + #accepted redispatch orders are cleared in set_asset_commit_constraints () + elif i == 'BE_position': + new_transactions =self.accepted_BE_orders[['delivery_day','delivery_time' + ,'cleared_quantity','direction', 'due_amount']] + k = 'BE_return' + #clear accepted_orders DataFrame. Will be filled again after settlement this round + self.accepted_BE_orders = self.accepted_BE_orders.iloc[0:0] + else: + raise Exception('position to be updated unknown') + if new_transactions.empty: + pass #do nothing. next position. + else: + #make sell orders negative + mask = new_transactions['direction'] == 'buy' + new_transactions[i] = new_transactions['cleared_quantity'].where(mask,-1*new_transactions['cleared_quantity']).astype('float64') + new_transactions[k] = new_transactions['due_amount'] + new_transactions.set_index(['delivery_day','delivery_time'], inplace=True) + #sum (saldo) of trades from the agent per timestamp + new_transactions = new_transactions.groupby(level =[0,1]).sum() + #add to 'position' column in self.trade_schedule + new_trade_schedule[i] = new_trade_schedule[i].add(new_transactions[i], fill_value = 0) + #add to 'return' column in self.financial_return + new_trade_returns[k] = new_trade_returns[k].add(new_transactions[k], fill_value = 0) + #overwrite self.trade_schedule + self.trade_schedule = new_trade_schedule.copy() + #overwrite self.financial returns + self.financial_return = new_trade_returns.copy() + #calculate total schedule. This value can be positive or negative and larger than the sum of all asset pmax. + self.trade_schedule['total_trade'] = self.trade_schedule[['DA_position','ID_position','RD_position','BE_position','forecast_error']].sum(axis=1) + #calculate total return. + self.financial_return['total_return'] = self.financial_return[['DA_return','ID_return','RD_return', 'BE_return','IB_return']].sum(axis=1) + + def set_asset_commit_constraints (self): + """ + Method: + Collects redisaptch transactions from last simulation step + and update a contraint dataframe per asset. + + In case an asset is associated with a redispatch tranaction, + additional constraints are applicable to the dispatch optimization. + + In case of upward redispatch transaction, the asset is bound to a + dispatch above the last dispatch schedule + upward redispatch quantity . + In case of a downward redisaptch, the asset is bound to a dispatch + below the last disaptch schedule - downward redispatch quantity + """ + + if self.accepted_red_orders.empty: + #In this case there are not time varying dispatch contraints on assets from redispatch, + #other than previous contraints in asset.constraint_df. + #However, the asset.constraint_df needs to be updated with dispatch schedule horizon. + all_ids = self.assets.index.values + #update asset.contraint_df + for i in range(len(all_ids)): + asset = self.assets.loc[all_ids[i],:].item() + asset.calc_dispatch_constraints(self.model.schedules_horizon) + else: + all_ids = self.assets.index.values + #update asset.contraint_df + i=0 + for i in range(len(all_ids)): + asset = self.assets.loc[all_ids[i],:].item() + + #get redispatch transactions of that asset + RDM_asset_schedule = self.accepted_red_orders.loc[self.accepted_red_orders['associated_asset'] == asset.assetID, + ['delivery_day', 'delivery_time','cleared_quantity','direction']] + if RDM_asset_schedule.empty: + #even though no redisaptch has been cleared for the agent, + #contraints are calculated to get the right size of the contraint_df + new_red_schedule = self.model.schedules_horizon.copy() + asset.calc_dispatch_constraints(new_red_schedule) + else: + new_red_schedule = self.model.schedules_horizon.copy() + #make temporary dataframe from accepted bids that can be added to redispatch asset schedule + #make buy orders negative to use them for a dispatch reduction (downward) + mask = RDM_asset_schedule['direction'] == 'sell' + RDM_asset_schedule['cleared_quantity'] = RDM_asset_schedule['cleared_quantity'].where(mask,-RDM_asset_schedule['cleared_quantity']) + + RDM_asset_schedule.set_index(['delivery_day','delivery_time'], inplace = True) + RDM_asset_schedule.sort_index(level = 1, inplace = True) + RDM_asset_schedule = RDM_asset_schedule.groupby(level =[0,1]).sum() + RDM_asset_schedule.rename(columns = {'cleared_quantity': 'commit'}, inplace = True) + + #place commitment in schedule horizon format + new_red_schedule['commit'] = RDM_asset_schedule['commit'].copy() + new_red_schedule['commit'] = new_red_schedule['commit'].loc[self.model.schedules_horizon.index].fillna(value=0).copy() + asset.calc_dispatch_constraints(new_red_schedule) + #remove all accepted redispatch orders from this round. + self.accepted_red_orders = self.accepted_red_orders.iloc[0:0] + + + def portfolio_dispatch(self): + """ + Method: + + PyPSA used to determine an optimal asset comitment, given the trade position of the agent. + - the total trade position implemented as 'load' + - a slack generator with high costs is used to capture the unfeasible dispatch ('open position) + - if the total trade position is negative it is not captured by the slack generator, + but cut out in advance from the schedule and added to imbalance. + - the method also calculates and administerns imbalances and profits of the agent + - The method furthermore determines the available capacity per asset. + + """ + + print('start portfolio optimization of Agent ', self.unique_id) + + #all assets id's of this agent + all_ids = self.assets.index.values + must_run_commit= DataFrame() + if self.unchanged_position == True: + print('no additional portfolio optimization needed because nothing has changed for this agent') + #add dispatch cost of current (thus realized) dispatch to bank account + day,mtu = self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps,0) + if (day,mtu) in self.financial_return.index: + self.money += self.financial_return.loc[(day,mtu),'total_dispatch_costs'].copy() + else: + #convert model time index to PyPSA snapshot format (no tuples or multiindex) + snap = DataFrame(index=self.model.schedules_horizon.index) + snap=snap.reset_index() + snap['strIndex']=snap['delivery_day'].map(str)+str('_')+snap['delivery_time'].map(str) + self.commit_model.set_snapshots(snap['strIndex']) + + #add the trade position to network model load. + # Note that positive values mean consumption in PyPSA. + # Positive trade positions, however, mean long position. Trade schedule is therefore multiplied by -1 + relevant_trades = -self.trade_schedule.loc[self.model.schedules_horizon.index].copy() + if len(relevant_trades['total_trade'])== len(snap['strIndex']): + #adjust relevant trades by setting negative values ('generation') on 0 + if (relevant_trades['total_trade']<0).any(): + relevant_trades['total_trade'] = relevant_trades['total_trade'].where(relevant_trades['total_trade']>=0.0,0) + print('Agent{} has a total schedule with values < 0'.format(self.unique_id)) + #make list from series + commit_lst = list(relevant_trades['total_trade'].fillna(value=0)) + else: + raise Exception ('there is a issue with the snapshot timestamps and asset trade_schedule timestamps') + #assigne trade position to commit_model + self.commit_model.loads_t.p_set['trade_position']= commit_lst + #calculate timestamp of last round + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + #Assign time variant asset constraints in p.u. of pmax to each asset of agent + for i in range(len(all_ids)): + asset = self.assets.loc[all_ids[i],:].item() + if len(snap['strIndex']) == len(asset.constraint_df.loc[self.model.schedules_horizon.index]): + #to be given to pypsa + pmin_t = list(asset.constraint_df['p_min_t'].loc[self.model.schedules_horizon.index]/asset.pmax) + pmax_t = list(asset.constraint_df['p_max_t'].loc[self.model.schedules_horizon.index]/asset.pmax) + + #Asset must-run contraints to be added to must_run_commit df (from upward redispatch), + #for later use as extra cpontraint in PyPSA optimal power flow + if (asset.constraint_df['upward_commit']>0).any(): + #constraint snapshots + up_const = asset.constraint_df[['dispatch_limit', 'p_max_t']].loc[self.model.schedules_horizon.index].loc[ + asset.constraint_df['upward_commit']!=0].reset_index() + up_const['gen_name'] = asset.assetID + up_const ['strIndex']=up_const['delivery_day'].map(str)+str('_')+up_const['delivery_time'].map(str) + #make an index from generatorname and snapshot for later use in pypsa constraint method + up_const.set_index(['gen_name','strIndex'], inplace=True) + must_run_commit = pd.concat([must_run_commit, up_const]) + else: + print(asset.assetID) + raise Exception ('there is a issue with the snapshot timestamps and asset constraints timestamps') + self.commit_model.generators_t.p_max_pu[asset.assetID] = pmax_t + self.commit_model.generators_t.p_min_pu[asset.assetID] = pmin_t + + #ensure that minimum up & downtimes > optimization horizon are corrected + if asset.min_up_time > len(snap): + self.commit_model.generators.min_up_time[asset.assetID] = len(snap) + if asset.min_down_time > len(snap): + self.commit_model.generators.min_down_time[asset.assetID] = len(snap) + + #ensure that the initial disaptch status taken from last dispatch (relevant for start stop costs) + try: + last_dispatch = asset.schedule.loc[(day,MTU), 'commit'] + if last_dispatch > 0: + self.commit_model.generators.initial_status[asset.assetID]=1 + else: + self.commit_model.generators.initial_status[asset.assetID]=0 + except: + #in the first step there is no previous dispatch. + if not self.model.DA_marketoperator.test_init_dispatch.empty: + #test init dispatch + self.commit_model.generators.initial_status[asset.assetID]=self.model.DA_marketoperator.test_init_dispatch[asset.assetID] + else: + #assume 1 + self.commit_model.generators.initial_status[asset.assetID]=1 + + def red_commit_constraint(network, snapshots): + """this method gives an extra must-run constraint to generators that have been + commited to upward redispatch""" + if must_run_commit.empty: + pass + else: + gen_p_bounds = {(gen_sn) : (must_run_commit.loc[gen_sn,'dispatch_limit'], + must_run_commit.loc[gen_sn,'p_max_t']) + for gen_sn in must_run_commit.index.values} + red_must_run={} + for gen_sn in must_run_commit.index.values: + red_must_run[gen_sn] = [[(1, network.model.generator_p[gen_sn])],"><", gen_p_bounds[gen_sn]] + l_constraint(network.model, "must_run", red_must_run, list(must_run_commit.index.values)) + + #run linear optimal power flow + try: + lopf_status= self.commit_model.lopf(self.commit_model.snapshots, extra_functionality = red_commit_constraint, + solver_name= self.model.exodata.solver_name, free_memory={'pypsa'}) + + except: + print(self.commit_model.generators_t.p_max_pu) + print(self.commit_model.generators_t.p_min_pu) + import pdb + pdb.set_trace() + + #process loadflow results to dispatch and trade schedule + opt_dispatch = self.commit_model.generators_t.p.copy() + opt_dispatch['long_position']= -opt_dispatch['long_position'] + + if opt_dispatch.empty: + print('Issue with agent ',self.unique_id) + print(self.trade_schedule) + raise Exception('optimal power flow did not find a solution') + #convert index again + opt_dispatch.index = opt_dispatch.index.str.split(pat='_', expand =True) + opt_dispatch.index.set_names(['delivery_day','delivery_time'], inplace=True) + #make inters from index + opt_dispatch.reset_index(inplace=True) + opt_dispatch[['delivery_day','delivery_time']] = opt_dispatch[['delivery_day','delivery_time']].astype('int64') + opt_dispatch.set_index(['delivery_day','delivery_time'], inplace=True) + #calculate total dispatch (excluding the dummy generator/storage) + opt_dispatch['total_dispatch'] =opt_dispatch.sum(axis=1)- opt_dispatch['long_position'] - opt_dispatch['short_position'] + self.trade_schedule.loc[self.model.schedules_horizon.index,'total_dispatch'] = opt_dispatch['total_dispatch'].loc[self.model.schedules_horizon.index] + + #positive trade schedule values mean that more is bought than sold (long), + #negative trade schedule values mean short position (more sold than bought) + #dispatch positive means injection to grid, negative means consumption + #total trade schedule + total dispatch = imbalance position. + # a positive imbalance position is a long imbalance position ->more produced than (net) sold. + # a negative imbalance position is a short imbalance position ->less produced than (net) sold + self.trade_schedule['imbalance_position'].loc[ + self.model.schedules_horizon.index] = self.trade_schedule[ + 'total_dispatch'].loc[self.model.schedules_horizon.index + ] + self.trade_schedule['total_trade'].loc[ + self.model.schedules_horizon.index] + if (self.trade_schedule['imbalance_position'].loc[self.model.schedules_horizon.index]!=0).any(): + print('IMBALANCE of agent: ', self.unique_id) + + + #calculate dispatch costs (also devided by 4 as we have 15 minute steps) + dispatch_cost = opt_dispatch.copy() + for j in opt_dispatch.loc[:,(opt_dispatch.columns != 'long_position')& ( + opt_dispatch.columns != 'short_position')& ( + opt_dispatch.columns != 'total_dispatch')].columns: + #variable cost + dispatch_cost['var_cost_'+j] = dispatch_cost[j] * self.commit_model.generators.loc[j,'marginal_cost']/4 + + #calculate startstop cost + startstopcost =Series([1]*len(dispatch_cost),index =dispatch_cost.index).where(dispatch_cost[j] > 0,0) + #starts=1, stops=-1 + startstopcost=startstopcost - startstopcost.shift(1) + #take into account initial status + if (self.commit_model.generators.initial_status[j]==1)&(int(round(dispatch_cost[j].iloc[0]))==0): + startstopcost.iloc[0] = -1 + elif (self.commit_model.generators.initial_status[j]==0)&(int(round(dispatch_cost[j].iloc[0]))>0): + startstopcost.iloc[0] = 1 + else: + startstopcost.iloc[0] = 0 + startstopcost.loc[startstopcost==1] =self.commit_model.generators.start_up_cost[j] + startstopcost.loc[startstopcost==-1] =self.commit_model.generators.shut_down_cost[j] + dispatch_cost['fix_cost_'+j] = startstopcost + dispatch_cost.drop(opt_dispatch.columns, axis=1,inplace = True) + #calculate total dispatch cost (startstop cost included) + dispatch_cost['total_dispatch_costs'] = dispatch_cost.sum(axis=1) + + #dispatch costs are by definition negative + self.financial_return.loc[self.model.schedules_horizon.index,'total_dispatch_costs'] = -dispatch_cost[ + 'total_dispatch_costs'].loc[self.model.schedules_horizon.index].copy() + + #calculate total profit + self.financial_return['profit'] = self.financial_return[['total_return','total_dispatch_costs']].sum(axis=1) + + + #add dispatch cost of current (thus realized) dispatch to bank account + day,mtu = self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps,0) + self.money += self.financial_return.loc[(day,mtu),'total_dispatch_costs'].copy() + #end of if-else unchanged_position == True + + """assign dispatch values to asset schedule and calculate available capacity""" + i=0 + for i in range(len(all_ids)): + asset = self.assets.loc[all_ids[i],:].item() + #enlarge asset schedule time index + mask =self.model.schedules_horizon.index.isin(asset.schedule.index) + asset.schedule = asset.schedule.append(self.model.schedules_horizon.loc[~mask,asset.schedule.columns]) + if self.unchanged_position == False: #if unchanged == True this step is skipped + #add optimal dispatch results to asset schedule, without overwriting the past, i.e. realized dispatch + mask = asset.schedule.index.isin(opt_dispatch.index) + asset.schedule['commit'] = asset.schedule['commit'].where(~mask,opt_dispatch[asset.assetID].copy()) + #ensure that no very small values from the solver stay in results + asset.schedule['commit'] =asset.schedule['commit'].round(0).astype(int) + + #calculate available capacity based on constraint df temp pmax and pmin + asset.schedule['p_max_t'].loc[self.model.schedules_horizon.index] = asset.constraint_df['p_max_t'] + asset.schedule['p_min_t'].loc[self.model.schedules_horizon.index] = asset.constraint_df['p_min_t'] + + asset.schedule['available_up'] = (asset.schedule['p_max_t'] - asset.schedule['commit']).where( + (asset.schedule['commit'] >= asset.pmin) & ( + asset.schedule['p_max_t'] > asset.schedule['commit']) & ( + asset.constraint_df['downward_commit'] == 0), 0) + #available down is also a positive value!! + asset.schedule['available_down'] = (asset.schedule['commit']-asset.schedule['p_min_t']).where( + (asset.schedule['commit'] > asset.schedule['p_min_t']) & (asset.constraint_df['upward_commit'] == 0), 0) + + if ((asset.schedule['available_up'] < 0).any())|((asset.schedule['available_down'] < 0).any()): + import pdb + pdb.set_trace() + raise Exception('available_up and available_down must be values >= 0') + mask = (self.trade_schedule['imbalance_position'].loc[self.model.schedules_horizon.index] != 0) + if (mask == True).any(): + #Note: MTU with imbalance are restricted for usual bidding. + #The market party applies a intraday bidding strategy to these MTU + asset.schedule['available_up'].loc[self.model.schedules_horizon.index + ] = asset.schedule['available_up'].loc[ + self.model.schedules_horizon.index].where(~mask, 0) + asset.schedule['available_down'].loc[self.model.schedules_horizon.index + ] = asset.schedule['available_down'].loc[ + self.model.schedules_horizon.index].where(~mask, 0) + + #available capacity with contraint that it is <=ramp rate from one MTU to the next + asset.schedule['ramp_constr_avail_up'] =asset.schedule['available_up'].where( + asset.schedule['available_up']<=asset.ramp_limit_up *asset.pmax, asset.ramp_limit_up *asset.pmax) + asset.schedule['ramp_constr_avail_down'] =asset.schedule['available_down'].where( + asset.schedule['available_down']<=asset.ramp_limit_down *asset.pmax, asset.ramp_limit_down *asset.pmax) + #available capacity with constraint that it is <= remaining ramps considering commited ramp t-1 and t+1 + #Pt - Pt-1 + asset.schedule['commit_ramp_t-1'] = asset.schedule['commit'] - asset.schedule['commit'].shift(1) + #Pt+1 - Pt + asset.schedule['commit_ramp_t+1'] = asset.schedule['commit'].shift(-1) - asset.schedule['commit'] + #correct nan of first and last time stamp. Assume commit ramp of 0. + asset.schedule['commit_ramp_t-1'].fillna(value = 0, inplace=True) + asset.schedule['commit_ramp_t+1'].fillna(value = 0, inplace=True) + asset.schedule['rem_ramp_constr_avail_up'] = asset.schedule.apply( + lambda x: min(- x['commit_ramp_t-1'] + asset.ramp_limit_up *asset.pmax, + x['commit_ramp_t+1'] + asset.ramp_limit_up *asset.pmax, + x['available_up']), axis =1) + asset.schedule['rem_ramp_constr_avail_down'] = asset.schedule.apply( + lambda x: min(x['commit_ramp_t-1'] + asset.ramp_limit_down * asset.pmax, + - x['commit_ramp_t+1'] + asset.ramp_limit_up * asset.pmax, + x['available_down']), axis =1) + #if the portfolio dispatch optimization allows start stop changes >ramp limits, negative remaining ramps are to be avoided + asset.schedule['rem_ramp_constr_avail_up']=asset.schedule['rem_ramp_constr_avail_up'].where(asset.schedule['rem_ramp_constr_avail_up']>=0, 0) + asset.schedule['rem_ramp_constr_avail_down']=asset.schedule['rem_ramp_constr_avail_down'].where(asset.schedule['rem_ramp_constr_avail_down']>=0, 0) + + + def place_RD_orders(self): + """ + Method: determine order quantity and order price and other order attributes. + Then make an order message and send it to the redispatch order book. + + Note: order messages contain many orders. To reduce computation time, the order messages + are composed from lists, instead of manipulating DataFrames. This makes + the sorting of the content of the lists (i.e. the filling of the lists) crucial. + """ + #check if redipatch is part of simulation task + if self.model.exodata.sim_task['run_RDM[y/n]']=='n': + print('Agent {}:no redispatch in simlulation task'.format(self.unique_id)) + elif self.model.RD_marketoperator.rules['order_types']== 'IDCONS_orders': + #no dediacted redispatch orders allowed. Redispatch via IDCONS orders on intraday market. + pass + elif self.strategy['RDM_quantity']=='None': + #this agent does not participate in the redispatch market + print('Agent {}:does not participate in redispatch market'.format(self.unique_id)) + pass + else: + print("Agent {} makes redispatch bids".format(self.unique_id)) + + #first delete all own redispatch orders from previous round from orderbook + self.model.red_obook.delete_orders(agent_id_orders = self.unique_id) + #lists per order attribute + asset_location_lst = [] + agentID_lst = [] + assetID_lst = [] + init_lst = [] + direction_lst = [] + ordertype_lst = [] + qty_lst=[] + price_lst=[] + day_lst=[] + mtu_lst=[] + delivery_duration =[] + + gate_closure_MTU = self.model.RD_marketoperator.gate_closure_time + #delivery time lists + dayindex = list(self.model.schedules_horizon.index.get_level_values(0))[gate_closure_MTU:] + timeindex = list(self.model.schedules_horizon.index.get_level_values(1))[gate_closure_MTU:] + + #order initiation for up to 1000 agents (shows when order is initiated within one simulation step) + init = self.step_rank/1000 + self.model.clock.get_MTU() + + #get all assets of that market party + all_ids = self.assets.index.values + for i in range(len(all_ids)): + a = self.assets.loc[all_ids[i],:].item() + #store current asset schedule (excluding the past) for calculation of dispatch contraints from redispatch + a.schedule_at_redispatch_bidding= a.schedule.loc[self.model.schedules_horizon.index].copy() + if self.strategy['RDM_timing']=='instant': + #list containing three lists (delivery day, delivery time, delivery duration) + #used for block orders i.e. delivery duration > 1 MTU + startblocks =[[],[],[]] + stopblocks = [[],[],[]] + price_start_lst = [] + price_stop_lst= [] + if self.strategy['RDM_quantity']=='random': + #'random' quantity strategy determines small random quantity + qty_up_lst = self.small_random_quantity(a.schedule[['available_up','p_max_t']].loc[ + self.model.schedules_horizon.index].fillna(0)) + qty_down_lst = self.small_random_quantity(a.schedule[['available_down','p_max_t']].loc[ + self.model.schedules_horizon.index].fillna(0)) + elif (self.strategy['RDM_quantity']=='all_operational')|( + self.strategy['RDM_quantity']=='all_plus_startstop'): + #'all_operational' includes all available capacity, excluding start stop. + #'all__plus_start_stop' means all available capacity. + qty_up_lst = list(a.schedule['available_up'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + qty_down_lst = list(a.schedule['available_down'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + elif self.strategy['RDM_quantity']=='not_offered_plus_startstop': + #' not_offered_plus start_stop' means that offered quantities on intra-day market is deducted. + #get the position from offered on IDM + buy_position, sell_position =self.model.IDM_obook.get_offered_position(associated_asset=a.assetID) + #deduct offered position from availble capacity + if a.schedule['available_up'].loc[ + self.model.schedules_horizon.index].index.isin(sell_position.index).any(): + qty_up_lst = a.schedule['available_up'].loc[ + self.model.schedules_horizon.index].fillna(0).to_frame().join( + -sell_position).sum(axis=1).astype(int).copy().values + #correct negative values. Reason is a portfolio dispatch optimization after IDM clearing. + qty_up_lst[qty_up_lst < 0] = 0 + qty_up_lst = list(qty_up_lst) + else: + #sell_position empty or outside schedule + qty_up_lst = list(a.schedule['available_up'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + if a.schedule['available_down'].loc[ + self.model.schedules_horizon.index].index.isin(buy_position.index).any(): + qty_down_lst = a.schedule['available_down'].loc[ + self.model.schedules_horizon.index].fillna(0).to_frame().join( + -buy_position).sum(axis=1).astype(int).copy().values + qty_down_lst[qty_down_lst < 0] = 0 + qty_down_lst = list(qty_down_lst) + else: + qty_down_lst = list(a.schedule['available_down'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + + else: + raise Exception('redispatch quantity strategy not known') + if (self.strategy['RDM_quantity']=='all_plus_startstop')|( + self.strategy['RDM_quantity']=='not_offered_plus_startstop'): + #add startblock and stop blocks for strategies involving start and stop capacity. + av_cap = pd.concat([a.schedule.loc[self.model.schedules_horizon.index[gate_closure_MTU:]], + a.constraint_df.loc[self.model.schedules_horizon.index[ + gate_closure_MTU:],'upward_commit'].copy()], axis=1) + startblocks, stopblocks = self.start_stop_blocks (av_cap, \ + a.pmin, a.min_up_time, a.min_down_time,a.assetID) + #in case the pricing strategy contains no start stop markup, prices are per default srmc + if startblocks: + price_start_lst = [int(a.srmc)] * len(startblocks[0]) + if stopblocks: + price_stop_lst= [int(a.srmc)] * len(stopblocks[0]) + + #ORDER PRICING + + #short-run marginal costs are fundamental price to which mark-ups are added + price_up_lst = [int(a.srmc)] * len(dayindex) + price_down_lst = [int(a.srmc)] * len(dayindex) + + def add_markups_to_price_list(price_up_lst, price_down_lst, markup_up, markup_down): + + if len(markup_up)==len(price_up_lst): + price_up_lst = [i[0] +i[1] for i in zip(markup_up,price_up_lst)] + else: + import pdb + pdb.set_trace() + if len(markup_down)==len(price_down_lst): + price_down_lst= [i[0] +i[1] for i in zip(markup_down, price_down_lst)] + else: + import pdb + pdb.set_trace() + return (price_up_lst,price_down_lst) + + if self.strategy['RDM_pricing'] =='srmc': +# price_up_lst = [int(a.srmc)] * len(dayindex) +# price_down_lst = [int(a.srmc)] * len(dayindex) + price_start_lst = [int(a.srmc)] * len(startblocks[0]) + price_stop_lst= [int(a.srmc)] * len(stopblocks[0]) + elif (self.strategy['RDM_pricing']=='all_markup')|( + self.strategy['RDM_pricing']=='opportunity_markup'): + #opportunity markup + opportunity_markup_up = self.opportunity_markup( + direction='upward', of_quantity = qty_up_lst, + asset = a,success_assumption = 'offered_quantity') + opportunity_markup_down = self.opportunity_markup( + direction='downward', of_quantity = qty_down_lst, + asset = a, success_assumption = 'offered_quantity') + #mark-ups added to price list + price_up_lst, price_down_lst = add_markups_to_price_list( + price_up_lst, price_down_lst, + opportunity_markup_up[gate_closure_MTU:], opportunity_markup_down[gate_closure_MTU:]) + if (self.strategy['RDM_pricing']=='all_markup')|( + self.strategy['RDM_pricing']=='startstop_markup'): + #prices for start and stop blocks + start_markup = self.startstop_markup( + direction = 'upward', of_quantity = startblocks, asset = a, + gct = gate_closure_MTU, partial_call = False) + stop_markup = self.startstop_markup( + direction = 'downward', of_quantity = stopblocks, + asset = a, gct = gate_closure_MTU, partial_call = False) + price_start_lst, price_stop_lst = add_markups_to_price_list( + [a.srmc] * len (start_markup), [a.srmc] * len (stop_markup), + start_markup, stop_markup) + + if (self.strategy['RDM_pricing']=='all_markup')|( + self.strategy['RDM_pricing']=='ramping_markup'): + #ramping mark-up + ramp_markup_up =self.ramping_markup(direction='upward', + of_quantity = qty_up_lst, + asset = a) + ramp_markup_down =self.ramping_markup(direction='downward', + of_quantity = qty_down_lst, + asset = a) + #mark-ups added to price list + price_up_lst, price_down_lst = add_markups_to_price_list( + price_up_lst, price_down_lst, + ramp_markup_up[gate_closure_MTU:], ramp_markup_down[gate_closure_MTU:]) + if (self.strategy['RDM_pricing']=='all_markup')|(( + self.strategy['RDM_pricing']=='double_scoring_markup')&( + self.strategy['RDM_quantity']!='not_offered_plus_startstop')): + #mark-up for double scoring risk on two markets + doublescore_markup_up =self.doublescore_markup(direction='upward', + of_quantity = qty_up_lst, + asset = a) + doublescore_markup_down =self.doublescore_markup(direction='downward', + of_quantity = qty_down_lst, + asset = a) + #mark-ups added to price list + price_up_lst, price_down_lst = add_markups_to_price_list( + price_up_lst, price_down_lst, + doublescore_markup_up[gate_closure_MTU:], doublescore_markup_down[gate_closure_MTU:]) + + + #length of attribute lists + length =len(dayindex) * 2 + len(startblocks[0]) + len(stopblocks[0]) + #per order attribute a list over all agent assets is built + asset_location_lst += [a.location] * length + agentID_lst += [self.unique_id]* length + assetID_lst += [a.assetID]* length + init_lst += [init] * length + #note that the sorting of lists is important + direction_lst += ['buy'] * len(dayindex) + ['sell'] *len(dayindex) +['sell'] * len(startblocks[0])+['buy'] * len(stopblocks[0]) + ordertype_lst += ['redispatch_supply']* length + delivery_duration += [1] * len(dayindex) * 2 + startblocks[2] + stopblocks[2] + qty_lst += qty_down_lst[gate_closure_MTU:] + qty_up_lst[gate_closure_MTU:] + [a.pmin] *( + len(startblocks[0]) + len(stopblocks[0])) + price_lst += price_down_lst + price_up_lst + price_start_lst + price_stop_lst + day_lst += dayindex * 2 + startblocks[0] + stopblocks[0] + mtu_lst += timeindex * 2 + startblocks[1] + stopblocks[1] + else: + raise Exception ('redispatch timing strategy not known') + + orders = DataFrame() + #NOTE: important to have same ranking of columns and values + columns=['agent_id','associated_asset','delivery_location', + 'quantity','price', 'delivery_day','delivery_time', + 'order_type','init_time', 'direction', 'delivery_duration'] + + values =[agentID_lst,assetID_lst, asset_location_lst, + qty_lst, price_lst, day_lst,mtu_lst, + ordertype_lst, init_lst, direction_lst, delivery_duration] + + #make dataframe per column to maintain datatype of lists (otherwise seen as objects by pandas) + for i in range(len(columns)): + orders[columns[i]]=values[i] + #remove 0 MW rows + orders = orders.loc[orders['quantity']!=0].copy() + + if not orders.empty: + # insert order IDs (must be at second last column because of many dependencies) + orders.insert(loc = len(orders.columns)-2,column='order_id', + value =list(range(self.ordercount, self.ordercount + len(orders))) ) + orders['order_id']=orders['agent_id'] + orders['associated_asset'] + orders['order_id'].astype(str) + + #order count for the order ID + self.ordercount += len(orders) + order_message = OrderMessage(orders) + self.model.red_obook.add_order_message(order_message) + + + def place_ID_orders(self): + """ + Method: determine order quantity and order price and other order attributes. + Then make an order message and send it to the intra-day order book. + + Note: order messages contain many orders. To reduce computation time, the order messages + are composed from lists, instead of manipulating DataFrames. This makes + the sorting of the content of the lists (i.e. the filling of the lists) crucial. + """ + #check if redipatch is part of simulation task + if self.model.exodata.sim_task['run_IDM[y/n]']=='n': + print('Agent {}:no intraday market in simlulation task'.format(self.unique_id)) + elif self.strategy['IDM_quantity']=='None': + #this agent does not participate in the redispatch market + print('Agent {}:does not participate in redispatch market'.format(self.unique_id)) + pass + else: + #first delete all ID orders from previous round from orderbook + self.model.IDM_obook.delete_orders(agent_id_orders = self.unique_id) + + print("Agent {} makes ID bids".format(self.unique_id)) + #lists per order attribute + asset_location_lst = [] + agentID_lst = [] + assetID_lst = [] + init_lst = [] + direction_lst = [] + ordertype_lst = [] + qty_lst=[] + price_lst=[] + day_lst=[] + mtu_lst=[] + delivery_duration =[] + gate_closure_MTU = self.model.ID_marketoperator.gate_closure_time + dayindex = list(self.model.schedules_horizon.index.get_level_values(0))[gate_closure_MTU:] + timeindex = list(self.model.schedules_horizon.index.get_level_values(1))[gate_closure_MTU:] + #order initiation for up to 1000 agents (shows when order is initiated within one simulation step) + init = self.step_rank/1000 +self.model.clock.get_MTU() + + otype = 'intraday_limit_order' + #get all assets of that market party + all_ids = self.assets.index.values + + if (self.strategy['IDM_quantity']=='random_plus_cond_startstop')|( + self.strategy['IDM_quantity']=='all_plus_cond_startstop'): + #quantity strategies with conditional start stop capacity considers + #redispatch activation in previous simulation step. This strategy may be required for IDCONS. + prev_day, prev_mtu= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + if not self.model.red_obook.redispatch_demand_upward_all_df.empty: + #get redispatch demand from previous step + red_demand_upward =self.model.red_obook.redispatch_demand_upward_all_df.loc[ + self.model.red_obook.redispatch_demand_upward_all_df['offer_daytime']==(prev_day, prev_mtu)].copy() + red_demand_upward = red_demand_upward.groupby(by=['delivery_day', 'delivery_time']).first() + else: + red_demand_upward= DataFrame(columns=['quantity']) + if not self.model.red_obook.redispatch_demand_downward_all_df.empty: + red_demand_downward =self.model.red_obook.redispatch_demand_downward_all_df.loc[ + self.model.red_obook.redispatch_demand_downward_all_df['offer_daytime']==(prev_day, prev_mtu)] + red_demand_downward = red_demand_downward.groupby(by=['delivery_day', 'delivery_time']).first() + else: + red_demand_downward= DataFrame(columns=['quantity']) + #concat upward and downward demand + red_demand =pd.concat([red_demand_upward['quantity'],red_demand_downward['quantity']], axis=1) + for i in range(len(all_ids)): + a = self.assets.loc[all_ids[i],:].item() + #the following lists are required for blockorders + startblocks =[[],[],[]] + stopblocks = [[],[],[]] + price_start_lst = [] + price_stop_lst= [] + if self.strategy['IDM_timing']=='instant': + if (self.strategy['IDM_quantity']=='random')|( + self.strategy['IDM_quantity']=='random_plus_cond_startstop'): + #'random' quantity strategy determines small random quantity + qty_up_lst = self.small_random_quantity(a.schedule[['available_up','p_max_t']].loc[ + self.model.schedules_horizon.index].fillna(0)) + qty_down_lst = self.small_random_quantity(a.schedule[['available_down','p_max_t']].loc[ + self.model.schedules_horizon.index].fillna(0)) + elif (self.strategy['IDM_quantity']=='all_operational')|( + self.strategy['IDM_quantity']=='all_plus_cond_startstop'): + qty_up_lst = list(a.schedule['available_up'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + qty_down_lst = list(a.schedule['available_down'].loc[ + self.model.schedules_horizon.index].fillna(0).astype(int)) + else: + raise Exception('IDM quantity strategy not known') + if (self.strategy['IDM_quantity']=='random_plus_cond_startstop')|( + self.strategy['IDM_quantity']=='all_plus_cond_startstop'): + #'all_operational' includes all available capacity, excluding start stop. + #'all__plus_cond_start_stop' means all available capacity including start stop, in case redispatch was activated in the last round. + # This conditional start stop strategy is relevant for IDCONS-style (redispatch via intraday market) market designs. + + #store current asset schedule (excluding the past) for calculation of dispatch contraints from redispatch + a.schedule_at_redispatch_bidding= a.schedule.loc[self.model.schedules_horizon.index].copy() + av_cap = pd.concat([a.schedule.loc[self.model.schedules_horizon.index[gate_closure_MTU:]], + a.constraint_df.loc[self.model.schedules_horizon.index[ + gate_closure_MTU:],'upward_commit'].copy()], axis=1) + + if not red_demand.empty: + #startblocks are only calculated if demand in previous step is not empty + startblocks, stopblocks = self.start_stop_blocks (av_cap, \ + a.pmin, a.min_up_time, a.min_down_time,a.assetID) + #delete blocks that do not overlap with redispatch demand + for i in range(len(startblocks[0])-1,-1,-1): + #reverse range is used to delete from list without changing index + days, mtus = self.model.clock.calc_delivery_period_range( + startblocks[0][i], + startblocks[1][i], + startblocks[2][i]) + #add start day and MTU to the lists + days = [startblocks[0][i]]+days + mtus = [startblocks[1][i]]+mtus + + if red_demand.loc[red_demand.index.isin(list(zip(days,mtus)))].empty: + #remove from lists, because available startblock + #does not overlap with previous redisaptch demand + del startblocks[0][i] + del startblocks[1][i] + del startblocks[2][i] + for i in range(len(stopblocks[0])-1,-1,-1): + #reverse range is used to delete from list without changing index + days, mtus = self.model.clock.calc_delivery_period_range( + stopblocks[0][i], + stopblocks[1][i], + stopblocks[2][i]) + #add start day and MTU to the lists + days = [stopblocks[0][i]]+days + mtus = [stopblocks[1][i]]+mtus + if red_demand.loc[red_demand.index.isin(list(zip(days,mtus)))].empty: + #remove from lists, because available stopblock + #does not overlap with previous redisaptch demand + del stopblocks[0][i] + del stopblocks[1][i] + del stopblocks[2][i] + otype = 'IDCONS_order' + + #ORDER PRICING + #short-run marginal costs are fundamental price to which mark-ups are added + price_up_lst = [int(a.srmc)] * len(dayindex) + price_down_lst = [int(a.srmc)] * len(dayindex) + + def add_markups_to_price_list(price_up_lst, price_down_lst, markup_up, markup_down): + if len(markup_up)==len(price_up_lst): + price_up_lst = [i[0] +i[1] for i in zip(markup_up,price_up_lst)] + else: + import pdb + pdb.set_trace() + if len(markup_down)==len(price_down_lst): + price_down_lst= [i[0] +i[1] for i in zip(markup_down, price_down_lst)] + #aa=[i[0] +i[1] for i in zip(opportunity_markup_down, price_down_lst)] + else: + import pdb + pdb.set_trace() + return (price_up_lst, price_down_lst) + + if self.strategy['IDM_pricing'] == 'srmc+-1': + #Pricing strategy with fixed 1 Eur mark-up + price_up_lst = [int(a.srmc)+1] * len(dayindex) + price_down_lst = [int(a.srmc)-1] * len(dayindex) + elif (self.strategy['IDM_pricing'] == 'marginal_orderbook_strategy')|( + self.strategy['IDM_pricing'] == 'marg_obook_plus_startstop_plus_partialcall'): + #opportunity mark-ups + opportunity_markup_up = self.opportunity_markup( + direction='upward', of_quantity = qty_up_lst, + asset = a,success_assumption = 'offered_quantity') + opportunity_markup_down = self.opportunity_markup( + direction='downward', of_quantity = qty_down_lst, + asset = a, success_assumption = 'offered_quantity') + #add opportunity mark-up to fundamental costs (aka indifference price) + price_up_lst, price_down_lst = add_markups_to_price_list( + price_up_lst, price_down_lst, + opportunity_markup_up[gate_closure_MTU:], opportunity_markup_down[gate_closure_MTU:]) + + + if (len(price_up_lst) > 0) & (len(price_down_lst) > 0): + #prices including opportunity and intraday 'open order book' mark up. + # Note: price list is enlarged, as mark_up method needs to work with schedules_horizon + # (not considering gate closure time) + price_up_lst = self.intraday_markup([0]*gate_closure_MTU + price_up_lst, 'sell') + price_down_lst = self.intraday_markup([0]*gate_closure_MTU + price_down_lst, 'buy') + + #slice list to consider gate closure time + price_up_lst = price_up_lst[gate_closure_MTU:] + price_down_lst = price_down_lst[gate_closure_MTU:] + + if self.strategy['IDM_pricing'] == 'marg_obook_plus_startstop_plus_partialcall': + #start and stop capacity prices for IDCONS include a partial call markup + #(assuming that also start stop offers could be partially cleared in line with limit orders) + price_start_lst, start_markup = self.startstop_markup( + direction = 'upward', of_quantity = startblocks, + asset = a, gct = gate_closure_MTU, partial_call=True) + price_stop_lst, stop_markup = self.startstop_markup( + direction = 'downward', of_quantity = stopblocks, + asset = a, gct = gate_closure_MTU, partial_call=True) + if self.strategy['IDM_quantity']=='all_plus_cond_startstop': + #Ramping markups are only required for strategies with larget ('all') quanities. + #Small random quantities do not need a ramping mark-up, as no additional risk is imposed. + ramp_markup_up =self.ramping_markup(direction='upward', + of_quantity = qty_up_lst, + asset = a) + ramp_markup_down =self.ramping_markup(direction='downward', + of_quantity = qty_down_lst, + asset = a) + price_up_lst, price_down_lst = add_markups_to_price_list( + price_up_lst, price_down_lst, + ramp_markup_up[gate_closure_MTU:], ramp_markup_down[gate_closure_MTU:]) + else: + raise Exception ('IDM pricing strategy not known') + length =len(dayindex) * 2 + len(startblocks[0]) + len(stopblocks[0]) + #per order attribute a list over all agent assets is built + asset_location_lst += [a.location] * length + agentID_lst += [self.unique_id]* length + assetID_lst += [a.assetID]* length + init_lst += [init] * length + direction_lst += ['buy'] * len(dayindex) + ['sell'] *len(dayindex) +['sell'] * len(startblocks[0])+['buy'] * len(stopblocks[0]) + ordertype_lst += [otype]* length + delivery_duration += [1] * len(dayindex) * 2 + startblocks[2] + stopblocks[2] + qty_lst += qty_down_lst[gate_closure_MTU:] + qty_up_lst[gate_closure_MTU:] + [a.pmin] *( + len(startblocks[0]) + len(stopblocks[0])) + price_lst += price_down_lst + price_up_lst + price_start_lst + price_stop_lst + day_lst += dayindex * 2 + startblocks[0] + stopblocks[0] + mtu_lst += timeindex * 2 + startblocks[1] + stopblocks[1] + else: + raise Exception('IDM timing strategy not known') + #end intra-day orders attributes from available asset capacity + + #place intraday orders for the imbalance position of the agent (i.e. not from asset capacity) + #this section is actually driven by the imbalance strategy of the agent. + if (self.trade_schedule['imbalance_position'].fillna(value=0).astype(int)!=0).any(): + + imb = DataFrame(columns=['imbalance_position','direction','price']) + imb['imbalance_position'] = self.trade_schedule['imbalance_position'].fillna(value=0) + imb = imb.loc[imb['imbalance_position'] !=0].copy() + #filter out all imbalance of past delivery MTUs inlcuding gate closure time (because it is too late) + mask= imb.index.isin(self.model.schedules_horizon.iloc[gate_closure_MTU:].index) + imb = imb.loc[mask].copy() + if imb.empty: + #no orders needed + pass + else: + print ('making IDM orders to mitigate imbalances') + imb['direction'] = None + imb['price'] = None + + #try to sell for a long imbalance position + imb.loc[imb['imbalance_position']>0, 'direction']= 'sell' + #try to buy for a short imbalance position + imb.loc[imb['imbalance_position']<0, 'direction']= 'buy' + + #the imbalance risk price is an maximum price a agent would accept for a MTU + #imbalance price for buy is positive IB risk price, for sell is negative IB risk price + imb.loc[imb['imbalance_position']>0, 'price']= -self.imbalance_risk_price + imb.loc[imb['imbalance_position']<0, 'price']= self.imbalance_risk_price + if (self.strategy['IBM_quantity']=='random'): + #IBM quantity strategy 'random' assumes a patient agent only slowly trading the open positions + + #make negative (short) positions positive buy quantity values + #price is only provided to have dataframe + imb.loc[imb['imbalance_position']>0, 'imbalance_position']= self.small_random_quantity( + imb.loc[imb['imbalance_position']>0, ['imbalance_position','price']]) + imb.loc[imb['imbalance_position']<0, 'imbalance_position']= self.small_random_quantity( + imb.loc[imb['imbalance_position']<0, ['imbalance_position','price']].abs()) + qty_lst += list(imb['imbalance_position'].astype(int)) + elif (self.strategy['IBM_quantity']=='all'): + #IBM quantity strategy 'all' assumes an impatient agent, who quickly trades all open positions + # make negative (short) positions positive buy quantity values + imb['imbalance_position']= imb['imbalance_position'].abs().astype(int) + qty_lst += list(imb['imbalance_position']) + + elif (self.strategy['IBM_quantity']=='impatience_curve'): + """ agents with imbalance provide partly market and partly limit orders, + depending on the time before delivery""" + #impatience curve + imp_curve= DataFrame() + + #this impatient curve is shaped like cumulated ID trade quantity before delivery in NL (2016) + #other impatience curves may be provided here. To do: make this a parameter. + imp_curve['mtu_before_delivery'] = [8,12,16,20,24,28,32,36,40,2*96] + imp_curve['offer_share_market_orders'] =[1,0.85,0.7,0.5,0.4,0.3,0.25,0.2,0.15,0.1] + #add new columns for market order (MO) quantity and limit order (LO) quantity + imb['MO_vol']=None + imb['LO_vol']=None + for i in range(len(imp_curve)): + #select MTUs per imb_curve bin + if i == 0: + lt_idx = self.model.schedules_horizon.index.values[:imp_curve['mtu_before_delivery'].iloc[i]] + else: + lt_idx = self.model.schedules_horizon.index.values[imp_curve[ + 'mtu_before_delivery'].iloc[i-1]:imp_curve['mtu_before_delivery'].iloc[i]] + #multiply imbalance quantity with respective respective share to be placed as market orders and limit orders + imb.loc[imb.index.isin(lt_idx), 'MO_vol']=imb.loc[imb.index.isin(lt_idx), 'imbalance_position' + ] *imp_curve['offer_share_market_orders'].iloc[i] + imb.loc[imb.index.isin(lt_idx), 'LO_vol']=imb.loc[imb.index.isin(lt_idx), 'imbalance_position' + ] *(1-imp_curve['offer_share_market_orders'].iloc[i]) + #make a limit order quantity list (must be in shape of schedules_horizon) + imb_LO_vol =self.model.schedules_horizon.copy() + #add imbalance quantity to be placed as limit order + imb_LO_vol['commit'] = imb_LO_vol['commit'].add(imb['LO_vol'], fill_value=0) + #Limit order quantity is reduced to small random strategy (to hide the total open position in the open orderbook) + imb_LO_vol['small_random_limit_order']=self.small_random_quantity( + imb_LO_vol['commit'].abs().astype(int).to_frame()) + qty_down_lst =list(imb_LO_vol['small_random_limit_order'].abs().where(imb_LO_vol['commit'] < 0, 0).astype(int)) + qty_up_lst = list(imb_LO_vol['small_random_limit_order'].where(imb_LO_vol['commit'] > 0, 0).astype(int)) + #add to vol attribute lists (attention: market orders are added before limit orders to list) + qty_lst += list(imb['MO_vol'].abs().astype(int)) + qty_down_lst[gate_closure_MTU:] + qty_up_lst[gate_closure_MTU:] + else: + raise Exception('imbalance quantity strategy not known') + + if self.strategy.loc['IBM_pricing']=='market_order_strategy': + #strategy: all open positions are placed as market orders + ordertype_lst += ['market_order'] * len(imb) + price_lst +=list(imb['price']) + elif self.strategy.loc['IBM_pricing']=='marginal_orderbook_strategy': + #strategy takes the expected imbalance price as srmc price and applies the intra-day 'open order book' pricing strategy. + ordertype_lst += ['intraday_limit_order'] * len(imb) + imb['eIBP_long'] = self.intraday_markup(list(self.model.rpt.eIBP['expected_IBP_long']), 'sell')[gate_closure_MTU:] + imb['eIBP_short'] = self.intraday_markup(list(self.model.rpt.eIBP['expected_IBP_short']), 'buy')[gate_closure_MTU:] + price_lst += list(imb['eIBP_long'].where(imb['direction']=='sell',imb['eIBP_short'])) + elif self.strategy.loc['IBM_pricing']=='impatience_curve': + ordertype_lst += ['market_order'] * len(imb) + #the limit orders are placed with intra-day mark-ups, + #which take the expected imbalance price as srmc price. + #the market orders have the administrative high imbalance price. + price_up_lst = self.intraday_markup(list(self.model.rpt.eIBP['expected_IBP_long']), 'sell') + price_down_lst = self.intraday_markup(list(self.model.rpt.eIBP['expected_IBP_short']), 'buy') + #slice list to consider gate closure time + price_up_lst = price_up_lst[gate_closure_MTU:] + price_down_lst = price_down_lst[gate_closure_MTU:] + #first imbalance risk price is added for market orders (imb['price']), then prices for limit orders are added + price_lst +=list(imb['price']) + price_down_lst + price_up_lst + else: + raise Exception('imbalance pricing strategy not known') + + #add other order attributes + direction_lst += list(imb['direction']) + day_lst += list(imb.index.get_level_values(0)) + mtu_lst += list(imb.index.get_level_values(1)) + asset_location_lst += ['anywhere'] * len(imb) + agentID_lst += [self.unique_id] * len(imb) + assetID_lst += ['imbalance'] * len(imb) + init_lst += [init] * len(imb) + delivery_duration += [1] * len(imb) + if (self.strategy['IBM_quantity']=='impatience_curve'): + #add limit orders to attribute lists + length =len(dayindex) * 2 + asset_location_lst += ['anywhere'] * length + agentID_lst += [self.unique_id]* length + assetID_lst += ['imbalance']* length + init_lst += [init] * length + direction_lst += ['buy'] * len(dayindex) + ['sell'] *len(dayindex) + ordertype_lst += ['intraday_limit_order']* length + delivery_duration += [1] * length + day_lst += dayindex * 2 + mtu_lst += timeindex * 2 + + #make order dataframe from lists + orders = DataFrame() + columns=['agent_id','associated_asset','delivery_location', + 'quantity','price', 'delivery_day','delivery_time', + 'order_type','init_time', 'direction' , 'delivery_duration'] + values = [agentID_lst,assetID_lst, asset_location_lst, + qty_lst, price_lst, day_lst, mtu_lst, + ordertype_lst, init_lst, direction_lst, delivery_duration] + + #make dataframe per column to maintain datatype of lists (otherwise seen as objects by pandas) + for i in range(len(columns)): + orders[columns[i]]=values[i].copy() + #remove 0 MW rows + orders = orders.loc[orders['quantity']!=0].copy() + + if not orders.empty: + # insert order IDs (must be at second last column because of many dependencies) + orders.insert(loc = len(orders.columns)-2,column='order_id', + value =list(range(self.ordercount, self.ordercount + len(orders))) ) + orders['order_id']=orders['agent_id'] + orders['associated_asset'] + orders['order_id'].astype(str) + + #order count for the order ID + self.ordercount += len(orders) + order_message = OrderMessage(orders.copy()) + self.model.IDM_obook.add_order_message(order_message) + if not (orders['order_type']== 'IDCONS_order').empty: + #In case of IDCONS orders these orders also need to be stored in the redispatch orderbook + #for redispatch statistics + self.model.red_obook.add_order_message(OrderMessage(orders.loc[ + orders['order_type']== 'IDCONS_order', orders.columns])) + + #make ID bid triggerst immediate ID clearing + self.model.ID_marketoperator.clear_intraday(for_agent_id =self.unique_id) + + def place_BE_orders(self): + """ + Method: determine order quantity and order price and other order attributes. + Then make an order message and send it to the intra-day order book. + + Note: order messages contain many orders. To reduce computation time, the order messages + are composed from lists, instead of manipulating DataFrames. This makes + the sorting of the content of the lists (i.e. the filling of the lists) crucial. + """ + if self.model.exodata.sim_task['run_BEM[y/n]']=='n': + pass + else: + if (self.strategy['BEM_timing']=='at_gate_closure')|( + self.strategy['BEM_quantity']=='available_ramp')|( + self.strategy['BEM_pricing']=='srmc'): + print("Agent {} makes BE bids".format(self.unique_id)) + #first delete all redispatch orders from previous round from orderbook + self.model.BEM_obook.delete_orders(agent_id_orders = self.unique_id) + asset_location_lst = [] + agentID_lst = [] + assetID_lst =[] + init_lst = [] + direction_lst = [] + ordertype_lst = [] + qty_lst=[] + price_lst=[] + day_lst=[] + mtu_lst=[] + delivery_duration =[] + gate_closure_MTU = self.model.BE_marketoperator.gate_closure_time + if gate_closure_MTU = 1)].groupby( + by = ['min_vol','max_vol']): + + #make a random seed that is linked to the randomness of agent rank + seed= self.step_rank + self.model.schedule.steps + av_cap.loc[group.index,'rand_vol'] = np.random.RandomState(seed + ).randint(group['min_vol'].iloc[0], group['max_vol'].iloc[0]) + + if av_cap['rand_vol'].isnull().any(): + import pdb + pdb.set_trace() + return (list(av_cap['rand_vol'])) + + def start_stop_blocks (self, av_cap, pmin, min_up_time, min_down_time, assetID): + """Method: identifies consecutive MTUs of asset schedule at pmin + or at 0 MW. These MTU-blocks can be used for start-up or shut-down orders, if these periods + are >= min_up_time (for upward orders) or min_down_time (for downward orders). + + Assumption of this method: if a (MTU-block) period is < 2 * min_up_time or min_down_time + then the entire period is offered in one order. When >= + 2 * min_up_time or min_down_time, then several blocks with lengths of min_up_time or down_time are selected. + Note: + - The method returns two nested lists instead of DataFrames to be consistent with + list approach of make_bid methods (is faster than append to DF). + - One list for orders from start-up capacity one list for shut-down capacity. + - Each list contains lists with delivery day, delivery time and delivery duration. + [startdelday,startdeltime,startduration], [stopdelday, stopdeltime, stopduration] + - The method implicitly assumes that the quantity of these start/ stop blocks is pmin + - It has to be enshured that provided time series of av_cap starts with gate-closure time + (i.e. timestamps as of which the blocks shall be calculated) + """ + + if (pmin == 0) | (av_cap.empty): + #no dedicated stop pr start orders needed because the full range can be provided. + #Also if available capacity is empty. + #empty nested list return + return([[],[],[]],[[],[],[]]) + + def count_if_value(x,value): + #x is pandas.rolling object + amount = np.count_nonzero(np.where(x == value,1,0)) + return(amount) + + #check that no outages and stop commitments lead to new startblocks. + av_cap['commit'] = av_cap['commit'].where(av_cap['p_max_t']!=0, -1) + av_cap['commit'] = av_cap['commit'].where(~((av_cap['commit']==pmin)&(av_cap['upward_commit']!=0)), -1) + av_cap.reset_index(inplace=True) + + #av_cap timeseries has to star with gate-closure time. + gate_closure_time = (av_cap['delivery_day'].iloc[0], av_cap['delivery_time'].iloc[0]) + #search feasible start blocks (count lengths/duration of a block) + av_cap['feasible_startblock'] =av_cap['commit'].rolling(window=min_up_time).apply( + lambda x: count_if_value(x,0)) + #calculate the possible start time of each block + av_cap['del_time_startblock'] = av_cap.apply(lambda x: self.model.clock.calc_delivery_period_end( + (x['delivery_day'],x['delivery_time']), -min_up_time+2),axis=1) + #remove blocks with start before gate-closure time + av_cap['feasible_startblock'] =av_cap['feasible_startblock'].where( + av_cap['del_time_startblock']>=gate_closure_time, np.nan) + + #search feasible stop blocks + av_cap['feasible_stopblock'] =av_cap['commit'].rolling(window=min_down_time).apply( + lambda x: count_if_value(x,pmin)) + av_cap['del_time_stopblock'] = av_cap.apply(lambda x: self.model.clock.calc_delivery_period_end( + (x['delivery_day'],x['delivery_time']), -min_down_time+2),axis=1) + av_cap['feasible_stopblock'] =av_cap['feasible_stopblock'].where( + av_cap['del_time_startblock']>=gate_closure_time, np.nan) + i =0 + startdelday=[] + startdeltime =[] + startduration =[] + stopdelday =[] + stopdeltime =[] + stopduration =[] + + #block selection + while i < len(av_cap): + #selection start-up blocks + if av_cap['feasible_startblock'].iloc[i] == min_up_time: + #Subsequent block with duration min_down_time lies outside horizon. + #Last feasible block of horizon has duration == min_up_time + if i + min_up_time-1 >= len(av_cap) -1: + #select block with duration min_up_time -1 + (len(av_cap) -i) + if av_cap['feasible_startblock'].iloc[len(av_cap)-1] == min_up_time: + startduration += [min_up_time-1 + (len(av_cap) -i)] + startdelday += [int(av_cap['del_time_startblock'].iloc[i][0])] + startdeltime += [int(av_cap['del_time_startblock'].iloc[i][1])] + #jump accordingly in while loop + i += len(av_cap)-i + #Last feasible block of horizon has duration < min_up_time + else: + ##search last feasible block before av_cap['feasible_startblock'].iloc[i +k] + k = 1 + while av_cap['feasible_startblock'].iloc[i +k]==min_up_time: + k += 1 + startduration += [min_up_time -1 + k] + startdelday += [int(av_cap['del_time_startblock'].iloc[i][0])] + startdeltime += [int(av_cap['del_time_startblock'].iloc[i][1])] + i += k + #block lies within horizon and has duration == min_up_time + #and there is a subsequent block == min_up_time + elif av_cap['feasible_startblock'].iloc[i + min_up_time-1] == min_up_time: + #select block with duration min_up_time + startduration += [min_up_time] + startdelday += [int(av_cap['del_time_startblock'].iloc[i][0])] + startdeltime += [int(av_cap['del_time_startblock'].iloc[i][1])] + i += min_up_time + #subsequent block has duration < min up time. + else: + #search last feasible block before av_cap['feasible_startblock'].iloc[i + min_up_time-1] + k = 1 + while av_cap['feasible_startblock'].iloc[i +k]==min_up_time: + k += 1 + #select block with duration min_up_time -1 + k + startduration += [min_up_time -1 + k] + startdelday += [int(av_cap['del_time_startblock'].iloc[i][0])] + startdeltime += [int(av_cap['del_time_startblock'].iloc[i][1])] + i += k + #selection shut-down blocks + elif av_cap['feasible_stopblock'].iloc[i] == min_down_time: + if i + min_down_time-1 >= len(av_cap)-1: + if av_cap['feasible_stopblock'].iloc[len(av_cap)-1] == min_down_time: + stopduration += [min_down_time -1 + (len(av_cap) -i)] + stopdelday += [int(av_cap['del_time_stopblock'].iloc[i][0])] + stopdeltime += [int(av_cap['del_time_stopblock'].iloc[i][1])] + i += len(av_cap)-i + else: + k = 1 + while av_cap['feasible_stopblock'].iloc[i +k] == min_down_time: + k += 1 + stopduration += [min_down_time -1 + k] + stopdelday += [int(av_cap['del_time_stopblock'].iloc[i][0])] + stopdeltime += [int(av_cap['del_time_stopblock'].iloc[i][1])] + i += k + elif av_cap['feasible_stopblock'].iloc[i + min_down_time-1] == min_down_time: + stopduration += [min_down_time] + stopdelday += [int(av_cap['del_time_stopblock'].iloc[i][0])] + stopdeltime += [int(av_cap['del_time_stopblock'].iloc[i][1])] + i += min_down_time + else: + k = 1 + while av_cap['feasible_stopblock'].iloc[i +k]==min_down_time: + k += 1 + stopduration += [min_down_time -1 + k] + stopdelday += [int(av_cap['del_time_stopblock'].iloc[i][0])] + stopdeltime += [int(av_cap['del_time_stopblock'].iloc[i][1])] + i += k + else: + i+=1 + return ([startdelday,startdeltime,startduration], [stopdelday, stopdeltime, stopduration]) + + + def opportunity_markup(self, direction='upward', of_quantity = None, asset = None, + success_assumption=None, MTU_of_h_consideration=False, unit_test= None): + """ + Method: + - Returns a list of price mark-up for opportunity costs with length of the schedule horizon + - Assumption that market time unit of the quantity is 15 minutes. + Meaning the order quantity is devided by 4 to get the EUR/MWh. + - This opporunity mark-up method considers an imbalance market as opportunity to trade the offered capacity. + (alternative methods could use the intra-day market as determining for opportunity) + - Opportunity costs are obtained from input dataframe with pre-calculated opportunity costs + depending on srmc, day-ahead price and (optionally) MTU of an hour (MTU_of_h_consideration) + Please consult the documentation for more information. + - The method is structured as follows: + 1. Data collection + 2. risk quantity determination + 3. Risk price determination + 4. Mark-up determination + + Note: + - only order duration == 1 MTU (15 minutes) is considered. + - When MTU_of_h_consideration==True, the method makes a distinction regarding the MTU of the hour. + For this option the input data (self.model.exodata.opportunity_costs_db) must be delivered accordingly + with a column 'PTU_of_an_hour' element {1,2,3,4}""" + + #DATA COLLECTION + if not unit_test: + #day-ahead prices needed to estimate imbalance prices and then opportunity costs + if self.model.rpt.prices['DAP'].isnull().all(): + print('no day-ahead prices available. Possibly because DAM is not run.') + print('Default DA price of 30 EUR/MWh used') + DAP= 30 + MTU =list(self.model.schedules_horizon.index.get_level_values(1)) + DAP= [30] * len(MTU) + else: + DAP = self.model.rpt.prices['DAP'].loc[self.model.schedules_horizon.index].copy() + MTU = list(DAP.index.get_level_values(1)) + DAP =DAP.tolist() + + if MTU_of_h_consideration==True: + #MTU of hour list needed to get the specific opportunity cost distribution function + MTU_of_h = [] + for mtu in MTU: + mtu_of_h = mtu%4 + if mtu_of_h == 0: + mtu_of_h=4 + MTU_of_h += [mtu_of_h] + if direction == 'upward': + ramp= asset.ramp_limit_up * asset.pmax #maximum MW difference from one mtu to next. + elif direction == 'downward': + ramp= asset.ramp_limit_down * asset.pmax #maximum MW difference from one mtu to next. + srmc = asset.srmc + #exogenous opportunity prices dataframe + odf = self.model.exodata.opportunity_costs_db + else: + #unit test input data of mark-up method + DAP = unit_test['av_cap_input']['DAP'].tolist() + MTU = unit_test['av_cap_input']['delivery_time'].tolist() + ramp = unit_test['asset_input']['ramp'] + srmc = unit_test['asset_input']['srmc'] + odf = unit_test['opportunity_costs_input'] + + + #RISK QUANTITY DETERMINATION + #the risk quantity is determined by the assumed quantity that the asset may trade on alternative markets. + #the assumption is provided externally + + df = DataFrame() + df['offered_quantity'] = of_quantity #MW per mtu + #max average MW per mtu that can be delivered when order is activated within delivery mtu (linear ramp assumption) + df['max_ramp_direct_activation'] = ramp/2 + #max average MW per mtu that can be delivered when order is activated during the mtu before delivery mtu (linear ramp assumption) + df['max_ramp_mtu-1_activation'] = ramp + + if success_assumption=='max_ramp_direct_activation': + #risk quantity is the assumed rewarded quantity + df['risk_quantity'] =df[ + 'max_ramp_direct_activation'].where(df[ + 'max_ramp_direct_activation'] odf['K-value'].max(): + K = odf['K-value'].max() + elif srmc < odf['K-value'].min(): + K = odf['K-value'].min() + else: + K = srmc + try: + if MTU_of_h_consideration==True: + value =odf.loc[(odf['price_data']==IBP) & (odf['PTU_of_an_hour']==MTU_of_h[p]) & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == K),'Opp_costs_for_K'].iloc[0] + else: + value =odf.loc[(odf['price_data']==IBP) & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == K),'Opp_costs_for_K'].iloc[0] + except: + import pdb + pdb.set_trace() + intrinsic_values += [value] + df['intrinsic_values'] = intrinsic_values #EUR/MWh + + #calculate opporunity cost (EUR!) + df['opp_cost']=df['risk_quantity'] * df['intrinsic_values']/4 + #calculate the opportunity cost markup (EUR/MWh) for the offered quantity + df['markup'] = df['opp_cost']/df['offered_quantity'] + + if direction == 'downward': + #in line with notation markup for downward/buy orders is multiplied by -1, so that mark-up can be added to a price. + df['markup']= - df['markup'] + + mask = np.isnan(df['markup']) + df.loc[~mask, 'markup'] = df.loc[~mask,'markup'].round(0) + #make np. nan to 0. these orders are filetered out anyway because no 0MW orders are allowed. + df.loc[mask, 'markup'] = 0 + df['markup']=df['markup'].round().astype(int).copy() + if not unit_test: + return(list(df['markup'])) + else: + df['DAP'] =DAP + df['markup']=df['markup'].round().astype('int64').copy() + return(list(df['markup']), df) + + + def startstop_markup (self, direction='upward', of_quantity = None, + asset = None, gct = None, partial_call = False, + order_granularity= 1, minimum_call = 1, unit_test=None): + """ + Method: provides a list of mark_ups for risks of orders from startup or shut down capacity. + Optional a mark-up can be added regarding the risk of partial call (for types limit orders). + - It returns a list of the mark-ups (for later addition to other mark_ups). + - start-up and shut down of assets lead to potential imbalance costs (or savings) and fuel costs (or savings), + sfixed start-up/shut-down costs costs, and in case of overlapping with scheduled start and stop ramps, + it can lead to savings of fixed start-up/shut-down costs. + - Please consider the ASAM documentation for more explanation on the ramp mark-up. + - The method is structured as follows: + 1. Data collection + 2. risk quantity determination + 3. Risk price determination + 4. Mark-up determination + + + Note: + - Assumed input is nested list [[day],[time],[duration]] of block orders, + whereby the offer quantity is equal to pmin. + - No opportunity costs included. + + - Positive values for costs are actual cost from agent perspective. Negative values are savings. + - Notation regarding imbalance: When the expected respective imbalance price is positive, + short agents pay, long agents receive. In all balancing control states. + - The method enables imbalanace design with dual pricing, as imbalance prices for short and long are provided. + However, they may be the same (single-pricing situations). + + - If partial_call is True, the start-stop mark-up also contains a partial call mark-up + - Partial call risk considers missing fixed start stop cost and additional imbalance. + - Order granularity (MW) and minimum call (MW) determine the considered partial call risk quantity + - Assumed probability for various partial call scenario's is a uniform discrete distribution. + - Furthermore it is assumed that order granularity and minimum call, as well as offered + quantity are natural numbers. + """ + if not of_quantity[0]: + #if no start stop blocks are avaiable empty lists are returned + return([]) + + #COLLECTION AND JOINING OF DATA + if not unit_test: + srmc = asset.srmc #eur/mwh + pmax = asset.pmax #MW + pmin = asset.pmin #MW + ramp_limit_start_up = asset.ramp_limit_start_up # p.u. pmax per ISP + ramp_limit_shut_down = asset.ramp_limit_shut_down # p.u. pmax per ISP + start_up_cost = asset.start_up_cost #eur + shut_down_cost = asset.shut_down_cost + min_down_time = asset.min_down_time #ISPs + min_up_time = asset.min_up_time + + #expected imbalance prices + eIBP = self.model.rpt.eIBP.loc[self.model.schedules_horizon.index[gct:]] + av_cap = asset.schedule.loc[self.model.schedules_horizon.index[gct:]].copy() + + if ((av_cap['commit'] < pmin )&(av_cap['commit']>0)).any(): + import pdb + pdb.set_trace() + raise Exception('this method works only correctly in the absence of scheduled dispatch >pmin or 0') + if pmin < 1: + #this method does'nt work and makes no sense + return([],[]) + + #get expected imbalance prices for the known DAP + av_cap = pd.concat([av_cap,eIBP[['expected_IBP_short','expected_IBP_long']]], axis=1) + av_cap.reset_index(inplace=True) + else: + #unit test input data of mark-up method + direction #upward or downward + of_quantity #nested list with start or stop blocks [[delday][deltime][duration]] + asset = None # not needed, because for unit test asset value are provided seperately + #unit_test is dictionary + av_cap = unit_test['av_cap_input'] #df with available capacity and eIBP + srmc = unit_test['asset_input']['srmc'] + ramp_limit_start_up = unit_test['asset_input']['ramp_limit_start_up'] # p.u. pmax per ISP + ramp_limit_shut_down = unit_test['asset_input']['ramp_limit_shut_down']# p.u. pmax per ISP + pmax = unit_test['asset_input']['pmax'] #MW + pmin = unit_test['asset_input']['pmin'] #MW + start_up_cost = unit_test['asset_input']['start_up_cost'] #eur + shut_down_cost = unit_test['asset_input']['shut_down_cost'] + min_down_time = unit_test['asset_input']['min_down_time'] #ISPs + min_up_time = unit_test['asset_input']['min_up_time'] + + + markup_lst = [] + + #mark-up calculation per offer + for b in range(len(of_quantity[0])): + #risk quantity and columns nan + av_cap['risk_quantity_fuel'] = np.nan + av_cap['risk_quantity_imbalance'] = np.nan + av_cap['risk_price_imbalance'] = np.nan + #lists to capture the risk quantity ramp + pre_ramp=[] + post_ramp=[] + save_post_overlap_ramp = False + save_pre_overlap_ramp =False + #read order MTU and duration + delivery_day = of_quantity[0][b] + delivery_mtu = of_quantity[1][b] + delivery_duration = of_quantity[2][b] + start_pre_ramp = 1 + #t_delivery_start + t_delivery_start = av_cap.loc[(av_cap['delivery_day'] == delivery_day)&( + av_cap['delivery_time'] == delivery_mtu)].index[0] + + if direction == 'upward': + #minimum duration the asset needs to run + min_duration = min_up_time + #end MTU of asset delivery (and start of shut-down ramp) + t_delivery_end = t_delivery_start + max(delivery_duration, min_duration) + if t_delivery_end > len(av_cap): + #min_duration reduced to fit horizon + t_delivery_end = len(av_cap) + min_duration = min_duration - (t_delivery_start + max(delivery_duration, min_duration) - len (av_cap)) + #number of mtu to startup + startup_duration = int(pmin/(pmax * ramp_limit_start_up)) + #number of mtu to shut down + shutdown_duration =int(pmin/(pmax * ramp_limit_shut_down)) + elif direction == 'downward': + min_duration = min_down_time + t_delivery_end = t_delivery_start + max(delivery_duration, min_duration) + if t_delivery_end > len(av_cap): + #min_duration ignored + t_delivery_end = len(av_cap) + min_duration = min_duration - (t_delivery_start + max(delivery_duration, min_duration) - len (av_cap)) + startup_duration = int(pmin/(pmax *ramp_limit_shut_down)) + shutdown_duration =int(pmin/(pmax *ramp_limit_start_up)) + #t_pre_overlap_start + t_pre_overlap_start = t_delivery_start - startup_duration - shutdown_duration + #t_post_overlap_end + t_post_overlap_end = t_delivery_end + startup_duration+ shutdown_duration + + #additional_minrun_duration + extra_post_duration = (min_duration - delivery_duration) + + #ensure that out of schedule_horizon startstop times are ignored + if t_pre_overlap_start < 0: + t_pre_overlap_start = 0 + if t_post_overlap_end > len(av_cap): + t_post_overlap_end = len(av_cap) + + #RISK QUANTITY DETERMINION + if direction == 'upward': + #adjust start and stop ramps if beyond schedules horizon. + if t_delivery_start - startup_duration < 0: + start_pre_ramp = startup_duration- t_delivery_start + if t_delivery_end + shutdown_duration > len(av_cap): + shutdown_duration = len(av_cap) - t_delivery_end +1 + + scheduled_commitment = 0 + offer_dispatch = pmin + is_pre_overlap = (av_cap['commit'].iloc[t_pre_overlap_start:t_delivery_start] >=offer_dispatch) + is_post_overlap =(av_cap['commit'].iloc[t_delivery_end -extra_post_duration: t_post_overlap_end + 1] >=offer_dispatch) + #start ramp to delivery the offer minus the scheduled unit commitment + pre_ramp=[- scheduled_commitment + ramp_limit_start_up * pmax * t for t in range(start_pre_ramp, startup_duration)] + post_ramp=[- scheduled_commitment + pmin -ramp_limit_shut_down * pmax * t for t in range(1,shutdown_duration)] + #assumed scheduled ramp (post) from a scheduled start-up before delivery period. + pre_overlap_ramp = [ramp_limit_shut_down * pmax * t for t in range(1,startup_duration)] + #assumed scheduled ramp (pre) from a scheduled start-up after delivery period. + post_overlap_ramp= [pmin - ramp_limit_start_up * pmax * t for t in range(1,shutdown_duration)] + + if direction == 'downward': + #adjust start and stop ramps if beyond schedules horizon. + if t_delivery_start - shutdown_duration < 0: + start_pre_ramp = shutdown_duration - t_delivery_start + if t_delivery_end + startup_duration > len(av_cap): + startup_duration = len(av_cap) - t_delivery_end +1 + + scheduled_commitment = pmin + offer_dispatch = 0 + is_pre_overlap = (av_cap['commit'].iloc[t_pre_overlap_start:t_delivery_start] == offer_dispatch) + is_post_overlap =(av_cap['commit'].iloc[t_delivery_end-extra_post_duration: t_post_overlap_end + 1] == offer_dispatch) + pre_ramp=[- scheduled_commitment + pmin - ramp_limit_shut_down* pmax * t for t in range(start_pre_ramp, shutdown_duration)] + post_ramp=[- scheduled_commitment + ramp_limit_start_up * pmax * t for t in range(1,startup_duration)] + #assumed scheduled ramp (post) from a scheduled shut-down before delivery period. + pre_overlap_ramp= [pmin - ramp_limit_start_up * pmax * t for t in range(1,shutdown_duration)] + #assumed scheduled ramp (pre) from a scheduled start-up after delivery period. + post_overlap_ramp = [ramp_limit_shut_down * pmax * t for t in range(1,startup_duration)] + + if is_pre_overlap.any() : + #overlap with another scheduled start-stop leads to saving of start-up costs + save_pre_overlap_ramp = True + pre_overlap_start =(av_cap.iloc[t_pre_overlap_start:t_delivery_start]).loc[is_pre_overlap].index[-1] +1 + av_cap['risk_quantity_fuel'].iloc[pre_overlap_start :t_delivery_start + ] = offer_dispatch - scheduled_commitment + extension = (t_delivery_start -pre_overlap_start)-len(pre_overlap_ramp) + if extension > 0: + av_cap['risk_quantity_imbalance'].iloc[pre_overlap_start :t_delivery_start + ] = [offer_dispatch - sr for sr in pre_overlap_ramp + [scheduled_commitment] * extension] + else: + av_cap['risk_quantity_imbalance'].iloc[pre_overlap_start :t_delivery_start + ] = [offer_dispatch - sr for sr in pre_overlap_ramp[:extension]] + else: #no overlapping ramps + save_pre_overlap_ramp = False + if not av_cap.iloc[t_delivery_start-len(pre_ramp) :t_delivery_start].empty: + av_cap['risk_quantity_fuel'].iloc[t_delivery_start-len(pre_ramp) :t_delivery_start + ] = pre_ramp + av_cap['risk_quantity_imbalance'] = av_cap['risk_quantity_fuel'] + + + if is_post_overlap.any(): + save_post_overlap_ramp = True + post_overlap_start =av_cap.iloc[t_delivery_end-extra_post_duration: t_post_overlap_end + 1].loc[is_post_overlap].index[0] + av_cap['risk_quantity_fuel'].iloc[t_delivery_end-extra_post_duration: post_overlap_start + ]= offer_dispatch - scheduled_commitment + extension = (post_overlap_start - (t_delivery_end-extra_post_duration))-len(post_overlap_ramp) + if extension > 0: + av_cap['risk_quantity_imbalance'].iloc[t_delivery_end-extra_post_duration: post_overlap_start + ] = [offer_dispatch - sr for sr in extension * [scheduled_commitment] + post_overlap_ramp] + else: + av_cap['risk_quantity_imbalance'].iloc[t_delivery_end-extra_post_duration: post_overlap_start + ] = [offer_dispatch - sr for sr in post_overlap_ramp[-extension:]] + else: #no overlapping ramps + save_post_overlap_ramp = False + if not av_cap.iloc[t_delivery_end - extra_post_duration: t_delivery_end + len(post_ramp)].empty: + av_cap['risk_quantity_fuel'].iloc[t_delivery_end - extra_post_duration: t_delivery_end + len(post_ramp)]= ( + extra_post_duration)* [offer_dispatch - scheduled_commitment]+ post_ramp + av_cap['risk_quantity_imbalance'] = av_cap['risk_quantity_fuel'] + + + #RISK PRICE DETERMINATION + mask= av_cap.loc[av_cap['risk_quantity_imbalance'].notnull()]['risk_quantity_imbalance'] > 0 + av_cap['risk_price_imbalance'].loc[av_cap['risk_quantity_imbalance'].notnull()] = (- av_cap['expected_IBP_long'].loc[ + av_cap['risk_quantity_imbalance'].notnull()]).where(mask,- av_cap['expected_IBP_short'].loc[ + av_cap['risk_quantity_imbalance'].notnull()]) #EUR/MWh + + #risk_price_fuel = srmc + #fuel risk cost + additional_fuel_cost = (av_cap['risk_quantity_fuel'] * srmc/4).fillna(value = 0).sum() #EUR + imbalance_risk_cost = av_cap['risk_price_imbalance'].mul(av_cap['risk_quantity_imbalance']).fillna(value = 0).sum() #EUR + + + # check if start stop cost are saved + if direction == 'upward': + if save_pre_overlap_ramp == True: + saved_pre_overlap_cost = -shut_down_cost #EUR + start_up =0 #EUR + else: + saved_pre_overlap_cost = 0 + start_up = start_up_cost + if save_post_overlap_ramp == True: + saved_post_overlap_cost = -start_up_cost + shut_down =0 + else: + saved_post_overlap_cost = 0 + shut_down = shut_down_cost + elif direction == 'downward': + if save_pre_overlap_ramp == True: + saved_pre_overlap_cost = -start_up_cost + shut_down =0 + else: + saved_pre_overlap_cost = 0 + shut_down = shut_down_cost + if save_post_overlap_ramp == True: + saved_post_overlap_cost = -shut_down_cost + start_up =0 + else: + saved_post_overlap_cost = 0 + start_up = start_up_cost + + total_cost = start_up + shut_down + imbalance_risk_cost + additional_fuel_cost + saved_pre_overlap_cost + saved_post_overlap_cost + markup = int(round(total_cost/(pmin * delivery_duration/4),0)) #EUR/MWh + + + #PARTIAL CALL MARKUP + if partial_call == True: + #risk markup of partial activation 0< > pmin is included + #assumed probability of partial call is a uniform discrete distribution + if ((type(pmin) is not np.int32)&(type(pmin) is not np.int64)&(type(pmin) is not int))|( + type(minimum_call) is not int)|(type(order_granularity) is not int): + raise Exception ( + "partial call works with a assumption of natural numbers and therefore needs positive integers for pmin,minimum_call and order_granularity") + #expected value for partial call + ePC = (minimum_call + pmin)/2 + #constant offer quantity of pmin is assumed for start-stop orders (block order type) + risk_quantity_pc = pmin -ePC + + #mean expected imbalance price during delivery period + if direction == 'upward': + #assumption in case of partial upward call, dispatch needs to be adjusted to pmin. + #market party has long position. + #IBP long needs to be reversed, as positive prices means less cost (and risk_quantity_pc is > 0) + mean_eibp = - av_cap[['expected_IBP_long']].iloc[t_delivery_start:t_delivery_end].mean().round().values[0] + elif direction == 'downward': + #assumption in case of partial downward call, dispatch needs to be adjusted to 0. + #market party gets short position. + #IBP short is not reversed as positive prices multiplied with positive risk quantity increase costs + mean_eibp =av_cap[['expected_IBP_short']].iloc[t_delivery_start:t_delivery_end].mean().round().values[0] + + risk_price_pc = (markup + mean_eibp) + markup_pc = int(round(risk_price_pc * risk_quantity_pc/ pmin)) + #add partial call mark-up to start stop mark-up + markup = markup + markup_pc + + if direction == 'downward': + #in line with notation markup for downward/buy orders is multiplied by -1, + #so that the mark-up can be added to an offer price. + markup = -markup + + #add mark-up for this order to list + markup_lst += [markup] + + if not unit_test: + return(markup_lst) + else: + #return also av_cap in case of unit test. + return (markup_lst, av_cap, Series({'additional_fuel_cost':additional_fuel_cost, + 'imbalance_risk_cost':imbalance_risk_cost, + 'start_up':start_up,'shut_down':shut_down, + 'saved_pre_overlap_cost':saved_pre_overlap_cost, + 'saved_post_overlap_cost':saved_post_overlap_cost, + 'total_cost':total_cost,'markup':markup}).astype('int64')) + + + def ramping_markup(self, direction='upward', of_quantity = None, asset = None, unit_test=None): + """Method: provides a list of mark_ups for risks of infeasible ramps. + - It returns a list of the mark-ups (for later addition to other mark_ups). + - Infeasible ramps lead to potential imbalance costs (or savings) and fuel costs (or savings). + - Please consider the ASAM documentation for more explanation on the ramp mark-up. + - The method is structured as follows: + 1. Data collection + 2. risk quantity determination + 3. Risk price determination + 4. Mark-up determination + Note: + - Currently delivery periods > 1 (block orders) are NOT implemented. No error is raised if delivery period is > 1. + - Positive values for costs are actual cost from agent perspective. Negative values are savings. + - Offered orders are operational capacity orders. No check on other asset constraints in this method. + - Offer quantity are provided in list in size of model.schedules_horizon + """ + + if not of_quantity: + #if no quantity to be calculated + return([],[]) + if all(v==0 for v in of_quantity): + #only zero quantity + return([0]*len(of_quantity)) + + if not unit_test: + srmc = asset.srmc #EUR/MWh + pmax = asset.pmax #MW + ramp_limit_up = asset.ramp_limit_up * pmax #p.u. of pmax per MTU * MW + ramp_limit_down = asset.ramp_limit_down * pmax + + #test if there are unfeasible ramps + check_ramps = asset.schedule.loc[self.model.schedules_horizon.index].copy() + check_ramps['offered']= of_quantity + if direction == 'upward': + check_ramps['delta_ramp'] = check_ramps['rem_ramp_constr_avail_up'] - check_ramps['offered'] + elif direction == 'downward': + check_ramps['delta_ramp'] = check_ramps['rem_ramp_constr_avail_down'] - check_ramps['offered'] + if not (check_ramps['delta_ramp'] < 0).any(): + #no unfeasible ramps. markup is 0. + return([0]*len(of_quantity)) + + #collect and join data + #add get expected imbalance prices for the known day-ahead price to available capacity and dispatch schedule + eIBP = self.model.rpt.eIBP.loc[self.model.schedules_horizon.index].copy() + av_cap = asset.schedule.loc[self.model.schedules_horizon.index].copy() + av_cap = pd.concat([av_cap,eIBP[['expected_IBP_short','expected_IBP_long']]], axis=1) + av_cap.reset_index(inplace=True) + else: + #unit test input data of mark-up method + direction #upward or downward + of_quantity #list with MW values. Must have same length as av_cap + asset = None # not needed, because for unit test asset value are provided seperately + #unit_test is dictionary + av_cap = unit_test['av_cap_input'] #df with available capacity and eIBP + srmc = unit_test['asset_input']['srmc'] + ramp_limit_up = unit_test['asset_input']['ramp_limit_up'] #MW per MTU + ramp_limit_down = unit_test['asset_input']['ramp_limit_down'] #MW per MTU + if len(of_quantity) != len(av_cap): + raise Exception ('invalid unit test input') + + #mark-up calculation per offer + markup_lst = [] + for b in range(len(of_quantity)): + #check if this quantity has a unfeasible ramp + if not unit_test: + if check_ramps['delta_ramp'].iloc[b] >= 0: + #feasible -> no mark-up + markup_lst += [0] + continue + if of_quantity[b] == 0: + markup_lst += [0] + continue + + #RISK quantity DETERMINATION + #risk quantity in case of ramping is + # required required pre-ramp + required post ramp minus scheduled dispatch + + #required ramp to dispatch the offered quantity + av_cap['required_ramp']=0 + + + if direction == 'upward': + #new asset dispatch value to deliver offer quantity + new_commit = of_quantity[b] + av_cap['commit'].iloc[b].copy() + elif direction == 'downward': + new_commit = av_cap['commit'].iloc[b].copy() - of_quantity[b] + #this is an assumption of this method without control + delivery_duration = 1 + #mtu of start delivery period + t_delivery_start = b #t < start delivery period Ts + t_delivery_end = b + delivery_duration #t > end delivery period Te + #ramp (dispatch value list) before delivery MTU + pre_ramp=[] + #ramp (dispatch value list) after delivery MTU + post_ramp=[] + success= False + i = 1 + #CALCULATE PRE-RAMP + while success == False: + if t_delivery_start - i < 0: + #out of schedule_horizon ramps are ignored + break + if direction == 'upward': + max_ramp_to_delMTU = new_commit - i * ramp_limit_up + if max_ramp_to_delMTU > av_cap['commit'].iloc[t_delivery_start - i]: + pre_ramp +=[max_ramp_to_delMTU] + else: + pre_ramp +=[av_cap['commit'].iloc[t_delivery_start - i]] + success = True + elif direction == 'downward': + max_ramp_to_delMTU = new_commit + i * ramp_limit_down + if max_ramp_to_delMTU < av_cap['commit'].iloc[t_delivery_start - i]: + pre_ramp +=[max_ramp_to_delMTU] + else: + pre_ramp +=[av_cap['commit'].iloc[t_delivery_start - i]] + success = True + i += 1 + pre_ramp = list(reversed(pre_ramp)).copy() + + #CALCULATE POST-RAMP + i = 1 + success = False + while success == False: + if t_delivery_end + i > len(av_cap) -1: + #out of horizon_schedule ramps are ignored + break + if direction == 'upward': + max_ramp_to_delMTU = new_commit - i * ramp_limit_down + if max_ramp_to_delMTU > av_cap['commit'].iloc[t_delivery_end + i - 1]: + post_ramp +=[max_ramp_to_delMTU] + else: + post_ramp += [av_cap['commit'].iloc[t_delivery_end + i - 1]] + success = True + elif direction == 'downward': + max_ramp_to_delMTU = new_commit + i * ramp_limit_up + if max_ramp_to_delMTU < av_cap['commit'].iloc[t_delivery_end + i -1]: + post_ramp +=[max_ramp_to_delMTU] + else: + post_ramp += [av_cap['commit'].iloc[t_delivery_end + i -1 ]] + success = True + i += 1 + + #add pre and post ramp to av_cap + if pre_ramp: + av_cap['required_ramp'].iloc[t_delivery_start - len(pre_ramp):t_delivery_start ] = pre_ramp + if post_ramp: + av_cap['required_ramp'].iloc[t_delivery_end: t_delivery_end + len(post_ramp)]= post_ramp + + mask = av_cap['required_ramp'] ==0 + av_cap['risk_quantity'] = av_cap['required_ramp'].where(mask,av_cap['required_ramp'].sub(av_cap['commit'], fill_value=0)) + + + #RISK PRICE DETERMINATION + #In case of ramping, the risk price is determined by srmc (fuel costs/savings and imbalance price (for non-delivery). + av_cap['risk_price'] = 0 + mask= av_cap.loc[av_cap['risk_quantity'] !=0]['risk_quantity'] > 0 + av_cap['risk_price'].loc[av_cap['risk_quantity'] !=0] = (- av_cap['expected_IBP_long'].loc[ + av_cap['risk_quantity'] !=0] + srmc).where(mask,- av_cap['expected_IBP_short'].loc[ + av_cap['risk_quantity'] !=0] + srmc) #EUR/MWh + + #MARK-UP DETERMINATION + markup = int(round(av_cap['risk_price'].mul(av_cap['risk_quantity']).sum() /(of_quantity[b]) * delivery_duration/4,0)) #EUR/MWh + + if direction == 'downward': + #in line with notation markup for downward/buy orders is multiplied by -1, so that mark-up can be added to a price. + markup= - markup + + #add markup of offer to list + markup_lst += [markup] + + if not unit_test: + return(markup_lst) + else: + #return also av_cap in case of unit test. + return (markup_lst, av_cap) + + def doublescore_markup(self, direction='upward', of_quantity = None, asset = None, unit_test= None): + """Method: + - Provides a list of mark_ups for risks of double score on redispatch and intraday trade market/mechansim. + - It returns a list of the mark-ups only (for later addition to other mark_ups). + - The mark-up is only relevant for angent strategies where the same capacity is offered on two markets (IDM and RDM) simultanously. + - A 'double-scoring' situation would lead to non-delivery, with consequences regarding imbalance cost and fuel cost. + - Additional non-delivery penalties are not considered. + - Please consult the ASAM documentation for mor information on this mark-up. + - The method is structured as follows: + 1. Data collection + 2. risk quantity determination + 3. Risk price determination + 4. Mark-up determination + + Note: + - Assumption: capacity is first placed on the IDM, because of continous trading and instantanous clearing. + The capacity that is not (yet) cleared is subsequently also offered for redispatch. + - Assumption: the risk quantity is determined with a uniform distribution assumption regarding + the quantity that is double-scored: + exp value = (minCalled + maxCalled)/2. With minimum called capacity =1 and max. called = offer quantity + - Currently delivery duration == 1 (block orders) are implemented. No error is raised if delivery duration is > 1. + - Positive values for costs are actual cost from agent perspective. Negative values are savings. + - Offered orders are operational capacity orders. No check on asset commitment constraints in this method. + + """ + + if not of_quantity: + #if no quantity to be calculated + return([],[]) + if all(v==0 for v in of_quantity): + #only zero quantity + return([0]*len(of_quantity)) + + #COLLECTION OF DATA + if not unit_test: + srmc = asset.srmc #eur/mwh + + #get the open offered position from IDM + buy_position, sell_position =self.model.IDM_obook.get_offered_position(associated_asset=asset.assetID) + + #deduct offered position from available capacity + if direction == 'upward': + if asset.schedule['available_up'].loc[ + self.model.schedules_horizon.index].index.isin(sell_position.index).any(): + av_cap = asset.schedule['available_up'].loc[ + self.model.schedules_horizon.index].fillna(0).to_frame().join( + sell_position).copy() + + else: + #sell_position empty or outside schedule. No mark-up. + markup_up = [0]*len(of_quantity) + return(markup_up) + elif direction == 'downward': + if asset.schedule['available_down'].loc[ + self.model.schedules_horizon.index].index.isin(buy_position.index).any(): + av_cap = asset.schedule['available_down'].loc[ + self.model.schedules_horizon.index].fillna(0).to_frame().join( + buy_position).copy() + else: + #buy_position empty or outside schedule + markup_down = [0]*len(of_quantity) + return(markup_down) + + else: + raise Exception('direction must be upward or downward') + av_cap.rename(columns = {'quantity': 'other_market_position'}, inplace = True) + #expected imbalance prices + eIBP = self.model.rpt.eIBP.loc[self.model.schedules_horizon.index].copy() + #get expected imbalance prices for the known DAP + av_cap = pd.concat([av_cap,eIBP[['expected_IBP_short','expected_IBP_long']]], axis=1) + av_cap['offer_quantity'] = of_quantity + #this is an assumption of this method + delivery_duration = 1 + else: + #unit test input data of mark-up method + direction #upward or downward + of_quantity #list with MW values. Must have same length as av_cap + asset = None # not needed, because for unit test asset value are provided seperately + #unit_test is dictionary + av_cap = unit_test['av_cap_input'] #df with available capacity and eIBP + srmc = unit_test['asset_input']['srmc'] + #this is an assumption of this method without control + delivery_duration = 1 + if len(of_quantity) != len(av_cap): + raise Exception ('invalid unit test input') + + #RISK QUANTITY DETERMINATION + #assumption uniform distribution, with minimum called capacity =1 + #exp value = (minCalled + maxCalled)/2 + min_IDM_capacity = 1 #MW + av_cap['risk_quantity'] = (min_IDM_capacity + av_cap['other_market_position']).where( + av_cap['other_market_position']> 0, 0)/2 + + #in line with notation are short positions negative + if direction =='upward': + av_cap['risk_quantity'] = - av_cap['risk_quantity'] + + + #RISK PRICE DETERMINATION + #In case of double-score mark-up, risk price is determined by imbalance price + #(penalties are out of scope, but could be added here) + av_cap['risk_price'] = 0 + mask= av_cap.loc[av_cap['risk_quantity'] !=0]['risk_quantity'] > 0 + av_cap['risk_price'].loc[av_cap['risk_quantity'] !=0] = (- av_cap['expected_IBP_long'].loc[ + av_cap['risk_quantity'] !=0] + srmc).where(mask,- av_cap['expected_IBP_short'].loc[ + av_cap['risk_quantity'] !=0] + srmc) #EUR/MWh + + #MARK-UP DETERMINATION + av_cap['markup'] = av_cap['risk_price'].mul(av_cap['risk_quantity']).div( + av_cap['offer_quantity']).replace(to_replace=np.inf, value=0)* delivery_duration/4 #EUR/MWh + + if direction == 'downward': + #in line with notation markup for downward/buy orders is multiplied by -1, so that mark-up can be added to a price. + av_cap['markup']= - av_cap['markup'] + + av_cap['markup'].fillna(value= 0, inplace=True) + av_cap['markup'] = av_cap['markup'].round().astype(int).copy() + markup_lst = list(av_cap['markup']) + + if not unit_test: + return(markup_lst) + else: + #return also av_cap in case of unit test. + return (markup_lst, av_cap) + + def intraday_markup(self, indiff_price_lst, direction, + profit_margin_pu = 0.1, raise_margin_pu=0.03): + """Method: provides a heuristic mark-up for intraday orders to a open order book. + In case the indifference price (short-run marginal cost + opportunity cost) is extra marginal + (meaning the order will not be cleared instantanously), the order price is set to the next + more expensive order plus 1 Euro (in case of buy orders), respectively minus 1 euro (in case of sell order). + If the indifference price is intra-marginal (will be cleared instantanously), then the indifference + price plus a standard margin will be offered. + In case of extra-marginal indifference prices where no competing orders orders exist + the last cleared price + a raise marging is taken, when higher(sell)/lower(buy) than indifference price. + The raise margin is a strategic margin to raise the price in a step-wise negotiation via small orders. + + Note: + profit_margin is in p.u. ( 0.1 means 10% profit mark-up on indifference prices) + raise_margin_pu (0.02 means 2% markup on the latest highest price) + """ + + #make Series with time index from price list (note: the price_list must be in shape of schedules_horizon) + indiff_price = Series(indiff_price_lst, index = self.model.schedules_horizon.index) + price_list = indiff_price.copy() + price_list.loc[:] = np.nan + buyorders = self.model.IDM_obook.buyorders[['delivery_day','delivery_time','price']].copy() + sellorders = self.model.IDM_obook.sellorders[['delivery_day','delivery_time','price']].copy() + + #extra-marginal price setting + if direction == 'buy': + for timestamp ,orders_t in buyorders.groupby(by=['delivery_day','delivery_time']): + competing_price =orders_t['price'].loc[orders_t['price'] < indiff_price[timestamp]].max() + if (~math.isnan(competing_price))&(competing_price + 1 < indiff_price[timestamp]): + price_list[timestamp] = competing_price + 1 + + elif direction == 'sell': + for timestamp ,orders_t in sellorders.groupby(by=['delivery_day','delivery_time']): + competing_price = orders_t['price'].loc[orders_t['price'] > indiff_price[timestamp]].min() + if (~math.isnan(competing_price))&(competing_price - 1 > indiff_price[timestamp]): + price_list[timestamp] = competing_price - 1 + + #intra-marginal price setting + if direction == 'buy': + for timestamp ,orders_t in sellorders.groupby(by=['delivery_day','delivery_time']): + if (orders_t['price'] <= indiff_price[timestamp]* (1 - profit_margin_pu)).any(): + price_list[timestamp] = indiff_price[timestamp]* (1 - profit_margin_pu) + elif direction == 'sell': + for timestamp ,orders_t in buyorders.groupby(by=['delivery_day','delivery_time']): + if (orders_t['price'] >= indiff_price[timestamp] * (1 + profit_margin_pu)).any(): + price_list[timestamp] = indiff_price[timestamp] * (1 + profit_margin_pu) + + #get previous clearing prices for extra-marginal price setting + if direction == 'buy': + prev_highest_cleared_cur_step = self.model.IDM_obook.cleared_buyorders.set_index(['delivery_day', 'delivery_time']) + prev_highest_cleared_prev_step = self.model.IDM_obook.cleared_buyorders_all_df.set_index(['delivery_day', 'delivery_time']) + highest ='min' + elif direction == 'sell': + prev_highest_cleared_cur_step = self.model.IDM_obook.cleared_sellorders.set_index(['delivery_day', 'delivery_time']) + prev_highest_cleared_prev_step = self.model.IDM_obook.cleared_sellorders_all_df.set_index(['delivery_day', 'delivery_time']) + highest ='max' + prev_highest_cleared_prev_step=prev_highest_cleared_prev_step.loc[ + prev_highest_cleared_prev_step[ + 'offer_daytime']==self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps-1,0)].copy() + + mask= prev_highest_cleared_prev_step.index.isin(prev_highest_cleared_cur_step.index) + prev_highest_cleared=pd.concat([prev_highest_cleared_cur_step, + prev_highest_cleared_prev_step[~mask]]) + + if not prev_highest_cleared.empty: + #definition of highest price depends on buy or sell direction + prev_highest_cleared=prev_highest_cleared.reset_index().groupby(['delivery_day', 'delivery_time'])['cleared_price'].agg(highest).reset_index() + #make index values integers again + prev_highest_cleared[['delivery_day', 'delivery_time']]=prev_highest_cleared[['delivery_day', 'delivery_time']].astype('int64') + prev_highest_cleared.set_index(['delivery_day', 'delivery_time'], inplace=True) + + missing_prices = price_list.loc[price_list.isnull().values] + #use previous prices for missing prices (isnull) + for i in range(len(missing_prices)): + day = missing_prices.index.get_level_values(0)[i] + mtu = missing_prices.index.get_level_values(1)[i] + if not prev_highest_cleared.empty: + if (day,mtu) in prev_highest_cleared.index: + last_highest_price = prev_highest_cleared.loc[(day,mtu), 'cleared_price'] + else: + last_highest_price = None + else: + last_highest_price = None + if direction == 'sell': + if not last_highest_price == None: + if last_highest_price >= indiff_price[(day, mtu)] *(1 + profit_margin_pu): + price_list.loc[(day,mtu)] = last_highest_price *(1 + raise_margin_pu) + else: + price_list.loc[(day,mtu)] = indiff_price[(day, mtu)] *(1 + profit_margin_pu) + else: + price_list.loc[(day,mtu)] = indiff_price[(day, mtu)] *(1 + profit_margin_pu) + elif direction == 'buy': + if not last_highest_price == None: + if last_highest_price <= indiff_price[(day, mtu)] *(1 - profit_margin_pu): + price_list.loc[(day,mtu)] = last_highest_price*(1 - raise_margin_pu) + else: + price_list.loc[(day,mtu)] = indiff_price[(day, mtu)] *(1 - profit_margin_pu) + else: + price_list.loc[(day,mtu)] = indiff_price[(day, mtu)] *(1 - profit_margin_pu) + price_list = price_list.round(0).astype(int).copy() + return (list(price_list.values)) + + + + diff --git a/asam classes/OrderMessage.py b/asam classes/OrderMessage.py new file mode 100644 index 0000000..37ee8e8 --- /dev/null +++ b/asam classes/OrderMessage.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Aug 12 17:29:25 2017 +@author: Samuel Glismann + + +A order message is a set of of orders which can be submitted to an order book of a market operator. + +When initiating a order message the received orders are checked regarding consistency. +A DataFrame is returned. + + +Note: + - init_time attribute is not the actual time of the model run, but the rank number in + which the agents took steps. these rank is random (random scheduler). However important + to prioritise orders with same price + - Orders messages for the intra-day market are checked on internal matches. These are excluded. + +order type: limit order means that the quantity can be partially cleared, while the rest stays in the order book. +order type: All-or-none means that the order can only be matched entirely (or not) +order type: market order means that it is fill-and-kill order where as much as possible is cleared, but no rest stays in the order book. + Market orders have currently a (shadow) price of buy 1000 or sell -1000 EUR/MWh. this has an effect on market statistics. +order type: Intra-day Congestion Spread (IDCONS) order is a intra-day order which can also be used by a grid operator for redispatch. + This order type is for example applied in the Netherlands. +""" +import pandas as pd +from pandas import Series, DataFrame +import numpy as np + + +class OrderMessage(): + + def __init__(self, orders): + #label lists needed to convert single orders in dataframe collections + self.orderlabels = ['agent_id','associated_asset','delivery_location', + 'quantity','price', 'delivery_day','delivery_time', + 'order_type','init_time', 'order_id', 'direction','delivery_duration'] + + self.order_df = self.consistency_check(orders) + self.order_df, self.portfolio_exclusions = self.exclude_internal_IDtrades(self.order_df) + + def consistency_check(self, orders): + if type(orders) is list: + order_df = DataFrame(orders, columns = self.orderlabels) + else: + order_df = orders + if (order_df['order_type']=='redispatch_demand').any(): + #Grid Operator orders do not contain price integers but nan which are considered as float64 by dtypes + consistent_reference = DataFrame([['o','o','o',1,1.1, + 1,1,'o',1.1, + 'o','o',1]],columns=self.orderlabels).dtypes + else: + consistent_reference = DataFrame([['o','o','o',1,1, + 1,1,'o',1.1, + 'o','o',1]],columns=self.orderlabels).dtypes + if (order_df['quantity']<=0).any(): + import pdb + pdb.set_trace() + raise Exception('order message quantity <=0...') + + if consistent_reference.equals(order_df.dtypes): + return (order_df) + else: + import pdb + pdb.set_trace() + print(order_df) + print(order_df.dtypes) + print (consistent_reference) + raise Exception('Order Message contains invalid dtypes at some columns') + + def exclude_internal_IDtrades(self, orders): + """this is actually an agent portfolio 'matching'""" + orders.set_index(['delivery_day','delivery_time'], inplace=True) + orders.sort_index(inplace=True) + if ((orders['order_type']=='intraday_limit_order').any())|( + (orders['order_type']=='market_order').any())|( + (orders['order_type']=='IDCONS_order').any()): + excl_lst=[] + for deliveryday, orders_t in orders.groupby(level=[0,1]): + sells = orders_t.loc[orders_t['direction']=='sell'] + buys = orders_t.loc[orders_t['direction']=='buy'] + low_sells=sells.loc[sells['price']<=buys['price'].max()].copy() + high_buys=buys.loc[buys['price']>=sells['price'].min()].copy() + if low_sells['quantity'].sum()>high_buys['quantity'].sum(): + excl_lst =excl_lst + list(high_buys['order_id']) + low_sells.sort_values(['price'], ascending=True,inplace=True) + o_numb = len(low_sells.loc[low_sells['quantity'].cumsum()1].empty: + pass + else: + blocks = new_orders.loc[new_orders['delivery_duration']>1] + for i in range(len(blocks)): + df = DataFrame( + [blocks.iloc[i]] *(blocks['delivery_duration'].iloc[i] - 1)) + day_lst, mtu_lst = self.model.clock.calc_delivery_period_range( + blocks['delivery_day'].iloc[i], + blocks['delivery_time'].iloc[i], + blocks['delivery_duration'].iloc[i]) + df['delivery_day'] = day_lst + df['delivery_time'] = mtu_lst + new_orders = new_orders.append(df, ignore_index = True) + if self.buyorders_full_step.empty: + self.buyorders_full_step=new_orders.loc[new_orders['direction']=='buy'] + else: + self.buyorders_full_step = self.buyorders_full_step.append(new_orders.loc[new_orders['direction']=='buy'], + ignore_index =True) + if self.sellorders_full_step.empty: + self.sellorders_full_step=new_orders.loc[new_orders['direction']=='sell'] + else: + self.sellorders_full_step = self.sellorders_full_step.append(new_orders.loc[new_orders['direction']=='sell'], + ignore_index =True) + else: + self.redispatch_demand_orders_downward = new_orders.loc[new_orders['direction']=='buy'].copy() + self.redispatch_demand_orders_upward = new_orders.loc[new_orders['direction']=='sell'].copy() + else: + raise Exception('Orderbook only excepts orders of the OrderMessage() class') + + + def delete_orders(self, order_ids=None, agent_id_orders=None): + if agent_id_orders == None: + pass + else: + if not self.buyorders.empty: + self.buyorders = self.buyorders.loc[self.buyorders['agent_id'] != agent_id_orders].copy() + if not self.sellorders.empty: + self.sellorders = self.sellorders.loc[self.sellorders['agent_id'] != agent_id_orders].copy() + + def get_offered_position(self,associated_asset=None): + """method provides the sum quantity offered per delivery period of associated with a given asset""" + if associated_asset == None: + pass + else: + col=['delivery_day','delivery_time','quantity'] + if not self.buyorders.empty: + buy_position = self.buyorders.loc[self.buyorders['associated_asset'] == associated_asset, + col].copy().groupby(by=['delivery_day','delivery_time']).sum() + else: + buy_position = DataFrame() + if not self.sellorders.empty: + sell_position = self.sellorders.loc[self.sellorders['associated_asset'] == associated_asset, + col].copy().groupby(by=['delivery_day','delivery_time']).sum() + else: + sell_position = DataFrame() + return(buy_position, sell_position) + + + def update_orders(self, min_activ_time = None): + #removes all orders that are not executable anymore + act_day = self.model.clock.get_day() + act_time = self.model.clock.get_MTU() + + if not self.buyorders.empty: + self.buyorders = self.buyorders[(self.buyorders['delivery_day'] > act_day) | + ((self.buyorders['delivery_day'] == act_day) & (self.buyorders['delivery_time'] > act_time))] + if not self.sellorders.empty: + self.sellorders = self.sellorders[(self.sellorders['delivery_day'] > act_day) | + ((self.sellorders['delivery_day'] == act_day) & (self.sellorders['delivery_time'] > act_time))] + + def remove_matched_orders(self, orderID_df): + # make indexe objects from it + ind = orderID_df.set_index('order_id').index + ind1 = self.buyorders.set_index('order_id').index + self.buyorders = self.buyorders[~ind1.isin(ind)] + ind1 = self.sellorders.set_index('order_id').index + self.sellorders = self.sellorders[~ind1.isin(ind)] + + def remove_market_orders(self): + #market orders are fill-or-kill. They are not lasting in the orderbook when not instantanoulsy matched + self.buyorders = self.buyorders.loc[self.buyorders['order_type']!='market_order'].copy() + self.sellorders = self.sellorders.loc[self.sellorders['order_type']!='market_order'].copy() + + def adjust_partial_match_orders(self, orderID_df): + #this index reset is needed for alignment with orderID_df and correct mask + self.buyorders = self.buyorders.set_index('order_id') + self.sellorders = self.sellorders.set_index('order_id') + orderID_df.set_index('order_id', inplace =True) + + mask1 = self.buyorders.index.isin(orderID_df.index) + mask2 = self.sellorders.index.isin(orderID_df.index) + + #adds new temporary rem_vol column + self.buyorders= self.buyorders.join(orderID_df['rem_vol']) + self.sellorders= self.sellorders.join(orderID_df['rem_vol']) + + #overwrites quantity with remaining quantity where ~mask is False + self.buyorders['quantity']= self.buyorders['quantity'].where(~mask1,self.buyorders['rem_vol']) + self.sellorders['quantity']= self.sellorders['quantity'].where(~mask2,self.sellorders['rem_vol']) + + self.buyorders.reset_index(inplace = True) + self.sellorders.reset_index(inplace = True) + #drop all columns like rem_vol etc. that come from manipulation + self.buyorders = self.buyorders[self.offerlabels] + self.sellorders = self.sellorders[self.offerlabels] + + def get_obook_as_multiindex(self, selection= None, incl_location = False): + if selection == 'buyorders': + obook_as_multiindex = self.buyorders.copy() + sort_rule= [False,True] + elif selection == 'sellorders': + obook_as_multiindex = self.sellorders.copy() + sort_rule= [True,True] + else: + raise Exception('selection argument is unknown') + if incl_location == True: + mi_columns = ['delivery_location','delivery_day','delivery_time'] + elif incl_location == False: + mi_columns = ['delivery_day','delivery_time'] + obook_as_multiindex = obook_as_multiindex.set_index(mi_columns) + obook_as_multiindex.sort_index(level = 1, inplace = True) + obook_as_multiindex.sort_values(by =['price','init_time'], ascending= sort_rule, inplace=True) + + #useful columns for clearing and settlement + obook_as_multiindex['rem_vol'] = obook_as_multiindex['quantity'] + obook_as_multiindex['cleared_quantity'] = np.NaN + obook_as_multiindex['matched_order'] = np.NaN + obook_as_multiindex['cleared_price']= np.NaN + return (obook_as_multiindex) + + + + + + diff --git a/asam classes/Reports.py b/asam classes/Reports.py new file mode 100644 index 0000000..294f28c --- /dev/null +++ b/asam classes/Reports.py @@ -0,0 +1,777 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Oct 11 09:59:29 2017 +@author: Samuel Glismann +""" + +from mesa import Agent, Model +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +from ast import literal_eval + + + +class Reports(): + def __init__(self, model): + self.model = model + #all_books dict is used to capture market statistics + self.all_books ={} + try: + self.all_books['RDM']=self.model.red_obook + except AttributeError: + pass + try: + self.all_books['IDM']=self.model.IDM_obook + except AttributeError: + pass + try: + self.all_books['DAM']=self.model.DAM_obook + except AttributeError: + pass + try: + self.all_books['BEM']=self.model.BEM_obook + except AttributeError: + pass + + #here it is possible to add simple reports in MESA style + self.table_reporters = {} + self.model_reporters = {} + self.agent_reporters = {"bank_deposit": lambda a: a.money, + "step_rank": lambda a: a.step_rank} + + + self.prices= self.model.clock.schedule_template.copy() + self.prices = self.prices.assign(DAP=None,IBP_short=None,IBP_long=None, + control_state=None,IDM_weighted_mean=None, + RDM_upwards_weighted_mean=None, RDM_downwards_weighted_mean=None, + RDM_spread_weighted_mean=None) + self.prices.drop('commit',axis=1, inplace=True) + + #Df with expected imbalance prices. This is updated every step and has size of schedules_horizon + self.eIBP = DataFrame() + + + def publish_DAM_prices(self,clearing_prices): + #receives hourly clearing prices from DAM operator and converts them to prices per 15 minute MTU + #Note: this method needs to be changed when a 15-minute DAM is simulated. + clearing_prices = pd.concat([clearing_prices,clearing_prices,clearing_prices,clearing_prices]) + clearing_prices.sort_index(inplace=True) + if (self.model.clock.get_MTU() >= self.model.DA_marketoperator.gate_closure_time) & ( + self.model.schedule.steps == 0): + #get the right delivery mtu from the schedules horizon + mtus = list(self.model.schedules_horizon.index.get_level_values(1)) + elif (self.model.clock.get_MTU() < self.model.DA_marketoperator.gate_closure_time) & ( + self.model.schedule.steps == 0): + mtus = list(range(self.model.clock.get_MTU(),97)) + elif self.model.clock.get_MTU() == self.model.DA_marketoperator.gate_closure_time: + mtus = list(range(1,97)) + #cut of MTUs of a first hour that lie in the past. + clearing_prices = clearing_prices.iloc[len(clearing_prices)-len(mtus):] + clearing_prices['delivery_time'] = mtus + clearing_prices.drop('delivery_hour', axis = 1, inplace = True) + clearing_prices.set_index(['delivery_day','delivery_time'], inplace= True) + #change the bus name from pypsa model. to be generic, the column is copied and the original is dropped + clearing_prices.rename(columns={clearing_prices.columns[0]:'DAP'},inplace =True) + self.prices.loc[clearing_prices.index, 'DAP'] = clearing_prices['DAP'] + + def publish_BEM_control_state(self,cur_control_state, day, mtu): + self.prices.loc[(day,mtu),'control_state'] = cur_control_state + + def publish_IBM_prices(self,IBP_short,IBP_long, day, mtu): + self.prices.loc[(day,mtu),'IBP_short'] = IBP_short + self.prices.loc[(day,mtu),'IBP_long'] = IBP_long + + def publish_RDM_wm_prices(self): + #make weighted averages over all simulation steps + qty_red_up=self.model.red_obook.cleared_sell_sum_quantity + wm_red_up=(self.model.red_obook.cleared_sell_wm_price * qty_red_up/(qty_red_up.sum().T)).sum() + + qty_red_down=self.model.red_obook.cleared_buy_sum_quantity + wm_red_down=(self.model.red_obook.cleared_buy_wm_price * qty_red_down/(qty_red_down.sum().T)).sum() + #Attention, it should be mentioned explicitly that in case one value is missing a 0 is assumed. + wm_red_spread =wm_red_up.sub(wm_red_down, fill_value=0) + + self.prices['RDM_upwards_weighted_mean'] =wm_red_up + self.prices['RDM_downwards_weighted_mean'] =wm_red_down + self.prices['RDM_spread_weighted_mean'] =wm_red_spread + + def publish_IDM_wm_prices(self): + #make weighted averages over all simulation steps + qty_IDM = self.model.IDM_obook.cleared_sell_sum_quantity + wm_IDM=(self.model.IDM_obook.cleared_sell_wm_price * qty_IDM/(qty_IDM.sum().T)).sum() + self.prices['IDM_weighted_mean'] = wm_IDM + + def get_cleared_prices(self): + self.publish_RDM_wm_prices() + self.publish_IDM_wm_prices() + return(self.prices) + + def update_expected_IBP(self, MTU_of_h_consideration=False): + """ + The expected imbalance price is used by agents to evaluate risks in their price mark-up calculations. + The expected imbalance price method uses precalculated expected prices depending on day-ahead prices. + When MTU_of_h_consideration==True, the method makes a distinction regarding the MTU of the hour. + For this option the input data (opportunity_cost_db) must be delivered accordingly + with a column 'PTU_of_an_hour' element {1,2,3,4} + + Note: the expeced IBP dataframe is updated every step to ensure similar length + with self.model.schedules_horizon + + """ + if not self.prices['DAP'].isnull().all(): + self.eIBP = self.prices['DAP'].loc[self.model.schedules_horizon.index].to_frame() + else: + #if there is either no DAM run, or a default run without hourly prices + self.eIBP = self.model.schedules_horizon + self.eIBP['DAP'] = 30 + + #MTU of hour list needed to get the specific expected prices + MTU = list(self.eIBP.index.get_level_values(1)) + if MTU_of_h_consideration==True: + MTU_of_h = [] + for mtu in MTU: + mtu_of_h = mtu%4 + if mtu_of_h == 0: + mtu_of_h=4 + MTU_of_h += [mtu_of_h] + DAP = list(self.eIBP['DAP']) + odf = self.model.exodata.opportunity_costs_db + expected_values_short =[] + expected_values_long =[] + for p in range(len(DAP)): + #expeced value of IBP given a DA bin. The K-value is irrelevant here, but one existing (e.g. 30) must be chosen to get a unique return. + try: + if MTU_of_h_consideration==True: + value_short =odf.loc[(odf['price_data']=='IB_price_short') & (odf['PTU_of_an_hour']==MTU_of_h[p]) & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == 30),'bin_exp_value'].iloc[0] + else: + value_short =odf.loc[(odf['price_data']=='IB_price_short') & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == 30),'bin_exp_value'].iloc[0] + except: + import pdb + pdb.set_trace() + + expected_values_short += [int(round(value_short,0))] + + if MTU_of_h_consideration==True: + value_long =odf.loc[(odf['price_data']=='IB_price_long') & (odf['PTU_of_an_hour']==MTU_of_h[p]) & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == 30),'bin_exp_value'].iloc[0] + else: + value_long =odf.loc[(odf['price_data']=='IB_price_long') & ( + odf['DAP_left_bin']<=DAP[p]) & (odf['DAP_right_bin(excl)'] >DAP[p] ) & ( + odf['K-value'] == 30),'bin_exp_value'].iloc[0] + expected_values_long += [int(round(value_long,0))] + + self.eIBP['expected_IBP_short'] =expected_values_short + self.eIBP['expected_IBP_long'] = expected_values_long + + + def save_all_orders_per_round(self): + """ + - method must be applied before save_market_stats() + because order_df are deleted per round""" + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + key=(day,MTU) + for obook in self.all_books.keys(): + #save orders and add offer time + self.all_books[obook].buyorders_full_step['offer_daytime']= [key]*len(self.all_books[obook].buyorders_full_step) + self.all_books[obook].buyorders_all_df = pd.concat([self.all_books[obook].buyorders_all_df, + self.all_books[obook].buyorders_full_step], ignore_index=True) + self.all_books[obook].sellorders_full_step['offer_daytime']= [key]*len(self.all_books[obook].sellorders_full_step) + self.all_books[obook].sellorders_all_df = pd.concat([self.all_books[obook].sellorders_all_df, + self.all_books[obook].sellorders_full_step], ignore_index=True) + self.all_books[obook].cleared_buyorders['offer_daytime']= [key]*len(self.all_books[obook].cleared_buyorders) + self.all_books[obook].cleared_buyorders_all_df = pd.concat([self.all_books[obook].cleared_buyorders_all_df, + self.all_books[obook].cleared_buyorders], ignore_index=True) + self.all_books[obook].cleared_sellorders['offer_daytime']= [key]*len(self.all_books[obook].cleared_sellorders) + self.all_books[obook].cleared_sellorders_all_df = pd.concat([self.all_books[obook].cleared_sellorders_all_df, + self.all_books[obook].cleared_sellorders], ignore_index=True) + if obook == 'RDM': + #considering also redispatch demand orders + self.all_books[obook].redispatch_demand_orders_upward['offer_daytime']= [key]*len(self.all_books[obook].redispatch_demand_orders_upward) + self.all_books[obook].redispatch_demand_upward_all_df=pd.concat([self.all_books[obook].redispatch_demand_upward_all_df, + self.all_books[obook].redispatch_demand_orders_upward], ignore_index=True) + self.all_books[obook].redispatch_demand_orders_downward['offer_daytime']= [key]*len(self.all_books[obook].redispatch_demand_orders_downward) + self.all_books[obook].redispatch_demand_downward_all_df=pd.concat([self.all_books[obook].redispatch_demand_downward_all_df, + self.all_books[obook].redispatch_demand_orders_downward], ignore_index=True) + + #delete all df's with orders of that round for orderbook with actually valid orders + self.all_books[obook].sellorders_full_step =self.all_books[obook].sellorders_full_step.iloc[0:0] + self.all_books[obook].buyorders_full_step =self.all_books[obook].buyorders_full_step.iloc[0:0] + self.all_books[obook].cleared_sellorders =self.all_books[obook].cleared_sellorders.iloc[0:0] + self.all_books[obook].cleared_buyorders =self.all_books[obook].cleared_buyorders.iloc[0:0] + if obook == 'RDM': + self.all_books[obook].redispatch_demand_orders_upward=self.all_books[obook].redispatch_demand_orders_upward.iloc[0:0] + self.all_books[obook].redispatch_demand_orders_downward=self.all_books[obook].redispatch_demand_orders_downward.iloc[0:0] + + def save_market_stats(self, mode='at_end'): + """ This method stores market statistics regarding offered and cleared quantity and prices. + It can also store these statistic per simulation step to analyse the intermediate simulation stages. + The intermediate statistics may also be used by agents. + + In a matrix with delivery time in columns and simulation time (and area incase of redispatch) in index, + the following indicators are saved per market: + - sum of offered and cleared quantity per direction (buy sell) + - minimum, maximum and quantity weigtened average prices for offered and cleared orders per direction + + Note: + - if modues 'at_end' is chosen, the statistics for all markets are only once per simulation calculated. + This saves time compared to calculation per step with modus'every_round'. + - sum quantity indicators are sum of (cleared) order MW. Not MWh + - Truly, this method maybe written more beautifully when the statistics per variable are stored in a multi-index dataframe or nested dictionary. + + """ + + #calculate timestamp of previous round + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + + self.save_all_orders_per_round() + if (mode =='every_step')|((day==self.model.clock.end_date[0])&(MTU == self.model.clock.end_date[1])): + print('save market statistics of previous round') + + if mode =='every_step': + offer_daytimes = [(day,MTU)] + elif mode =='at_end': + offer_daytimes = list(self.model.clock.report_time_matrix.index.values) + for obook in self.all_books.keys(): + #select index type + if obook == 'RDM': #has also gridarea in index + round_index = (slice(None),slice(day,day),slice(MTU,MTU)) + else: + round_index = (day,MTU) + + #Offered sell orders + if self.all_books[obook].sellorders_all_df.loc[ + self.all_books[obook].sellorders_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + if obook =='IDM': + #market order prices need to be set to np.nan to avoid distortion of statistic + self.all_books[obook].sellorders_all_df.loc[self.all_books[obook].sellorders_all_df + ['order_type'] == 'market_order', 'price'] = np.nan + #get statistics of the orders + res = self.group_statistic(self.all_books[obook].sellorders_all_df.loc[ + self.all_books[obook].sellorders_all_df['offer_daytime'].isin(offer_daytimes)], 'quantity', obook) + + #add these statistics to respective statistic variables of the orderbook + if obook=='RDM': + included_areas=list(res.index.unique().get_level_values(level=0)) + res=res.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + #store results in report matrixes + self.all_books[obook].sell_sum_quantity.loc[(area,day,MTU),:] = res.loc[:,('quantity','sum',area)].copy() + self.all_books[obook].sell_min_price.loc[(area,day,MTU),:] = res.loc[:,('price','min',area)].copy() + self.all_books[obook].sell_wm_price.loc[(area,day,MTU),:] = res.loc[:,('price','weighted_mean',area)].copy() + self.all_books[obook].sell_max_price.loc[(area,day,MTU),:] = res.loc[:,('price','max',area)].copy() + else: + #store results in report matrixes + self.all_books[obook].sell_sum_quantity.loc[round_index,res.index] = res[('quantity','sum')].copy() + self.all_books[obook].sell_min_price.loc[round_index,res.index] = res[('price','min')].copy() + self.all_books[obook].sell_wm_price.loc[round_index,res.index] = res[('price','weighted_mean')].copy() + self.all_books[obook].sell_max_price.loc[round_index,res.index] = res[('price','max')].copy() + + #Offered buy orders + if self.all_books[obook].buyorders_all_df.loc[ + self.all_books[obook].buyorders_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + if obook =='IDM': + #market order prices of need to be set to np.nan to avoid distortion of statistic + self.all_books[obook].buyorders_all_df.loc[self.all_books[obook].buyorders_all_df + ['order_type'] == 'market_order', 'price'] = np.nan + #get statistics of the orders + res = self.group_statistic(self.all_books[obook].buyorders_all_df.loc[ + self.all_books[obook].buyorders_all_df['offer_daytime'].isin(offer_daytimes)], 'quantity', obook) + #add these statistics to respective statistic variables of the orderbook + if obook=='RDM': + included_areas=list(res.index.unique().get_level_values(level=0)) + res=res.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + #store results in report matrixes + self.all_books[obook].buy_sum_quantity.loc[(area,day,MTU),:] = res.loc[:,('quantity','sum',area)].copy() + self.all_books[obook].buy_min_price.loc[(area,day,MTU),:] = res.loc[:,('price','min',area)].copy() + self.all_books[obook].buy_wm_price.loc[(area,day,MTU),:] = res.loc[:,('price','weighted_mean',area)].copy() + self.all_books[obook].buy_max_price.loc[(area,day,MTU),:] = res.loc[:,('price','max',area)].copy() + else: + #store results in report matrixes + self.all_books[obook].buy_sum_quantity.loc[round_index, res.index] = res[('quantity','sum')].copy() + self.all_books[obook].buy_min_price.loc[round_index, res.index] = res[('price','min')].copy() + self.all_books[obook].buy_wm_price.loc[round_index, res.index] = res[('price','weighted_mean')].copy() + self.all_books[obook].buy_max_price.loc[round_index, res.index] = res[('price','max')].copy() + + #Cleared sell orders + if self.all_books[obook].cleared_sellorders_all_df.loc[ + self.all_books[obook].cleared_sellorders_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + res = self.group_statistic(self.all_books[obook].cleared_sellorders_all_df.loc[ + self.all_books[obook].cleared_sellorders_all_df['offer_daytime'].isin(offer_daytimes)], 'cleared_quantity', obook) + if obook=='RDM': + included_areas=list(res.index.unique().get_level_values(level=0)) + res=res.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + #store results in report matrixes + self.all_books[obook].cleared_sell_sum_quantity.loc[(area,day,MTU),:] = res.loc[:,('cleared_quantity','sum',area)].copy() + self.all_books[obook].cleared_sell_min_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','min',area)].copy() + self.all_books[obook].cleared_sell_wm_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','weighted_mean',area)].copy() + self.all_books[obook].cleared_sell_max_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','max',area)].copy() + else: + #store results in report matrixes + self.all_books[obook].cleared_sell_sum_quantity.loc[round_index, res.index] = res[('cleared_quantity','sum')].copy() + self.all_books[obook].cleared_sell_min_price.loc[round_index, res.index] = res[('cleared_price','min')].copy() + self.all_books[obook].cleared_sell_wm_price.loc[round_index, res.index] = res[('cleared_price','weighted_mean')].copy() + self.all_books[obook].cleared_sell_max_price.loc[round_index, res.index] = res[('cleared_price','max')].copy() + + #Clared buy orders + if self.all_books[obook].cleared_buyorders_all_df.loc[ + self.all_books[obook].cleared_buyorders_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + res = self.group_statistic(self.all_books[obook].cleared_buyorders_all_df.loc[ + self.all_books[obook].cleared_buyorders_all_df['offer_daytime'].isin(offer_daytimes)], 'cleared_quantity', obook) + if obook=='RDM': + included_areas=list(res.index.unique().get_level_values(level=0)) + res=res.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + #store results in report matrixes + self.all_books[obook].cleared_buy_sum_quantity.loc[(area,day,MTU),:] = res.loc[:,('cleared_quantity','sum',area)].copy() + self.all_books[obook].cleared_buy_min_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','min',area)].copy() + self.all_books[obook].cleared_buy_wm_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','weighted_mean',area)].copy() + self.all_books[obook].cleared_buy_max_price.loc[(area,day,MTU),:] = res.loc[:,('cleared_price','max',area)].copy() + else: + #store results in report matrixes + self.all_books[obook].cleared_buy_sum_quantity.loc[round_index, res.index] = res[('cleared_quantity','sum')].copy() + self.all_books[obook].cleared_buy_min_price.loc[round_index, res.index] = res[('cleared_price','min')].copy() + self.all_books[obook].cleared_buy_wm_price.loc[round_index, res.index] = res[('cleared_price','weighted_mean')].copy() + self.all_books[obook].cleared_buy_max_price.loc[round_index, res.index] = res[('cleared_price','max')].copy() + #add also redispatch demand statistics + if obook=='RDM': + ###round_index = (slice(None),slice(day,day),slice(MTU,MTU))# is in the beginning defined + #get redispatch demand order quantity upward + if self.all_books[obook].redispatch_demand_upward_all_df.loc[ + self.all_books[obook].redispatch_demand_upward_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + red = self.group_statistic(self.all_books[obook].redispatch_demand_upward_all_df.loc[ + self.all_books[obook].redispatch_demand_upward_all_df['offer_daytime'].isin(offer_daytimes)].fillna(value=0), 'quantity', obook) + included_areas=list(red.index.unique().get_level_values(level=0)) + red=red.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + self.all_books[obook].redispatch_demand_upward.loc[(area,day,MTU),:] = red.loc[:,('quantity','sum',area)].copy() + #get redispatch demand order quantity downward + if self.all_books[obook].redispatch_demand_downward_all_df.loc[ + self.all_books[obook].redispatch_demand_downward_all_df['offer_daytime'].isin(offer_daytimes)].empty: + pass + else: + red = self.group_statistic(self.all_books[obook].redispatch_demand_downward_all_df.loc[ + self.all_books[obook].redispatch_demand_downward_all_df['offer_daytime'].isin(offer_daytimes)].fillna(value=0), 'quantity', obook) + included_areas=list(red.index.unique().get_level_values(level=0)) + red=red.unstack(level=0) + for i in range(len(included_areas)): + area =included_areas[i] + self.all_books[obook].redispatch_demand_downward.loc[(area,day,MTU),:] = red.loc[:,('quantity','sum',area)].copy() + + + def group_statistic(self, data, quantity_type, obook): + """quantity_type must be string 'quantity' or string 'cleared_quantity'""" + if quantity_type == 'cleared_quantity': + price_type = 'cleared_price' + else: + price_type = 'price' + if obook=='RDM': + group_lst= ['delivery_location','delivery_day','delivery_time'] + else: + group_lst= ['delivery_day','delivery_time'] + changed_price=False + if pd.isnull(data[price_type]).all(): + #to avoid errors with agg() function + data[price_type]=1 + changed_price=True + + data=data.reset_index() + grouped = data.groupby(group_lst) + try: + + wm = lambda x: np.average(x, weights=data.loc[x.index,quantity_type]) + except: + import pdb + pdb.set_trace() + f = {quantity_type: ['sum'],price_type:{'weighted_mean' : wm, + 'mean': np.mean, 'max': np.max, 'min': np.min} } + result =grouped.agg(f) + + if changed_price == True: + #make price results np.nan again, but hold dataframe format + result[price_type]=np.nan + + result= result.reset_index() + result[['delivery_day','delivery_time']]=result[['delivery_day','delivery_time']].astype(int) + result.set_index(group_lst, inplace=True) + result.sort_index(inplace = True) + return(result) + + + def get_system_trade(self): + """sum of cleared orders (buy and sell seperated and as total saldo) + for all deliverytimes. Result shows all trades up to the step of method application""" + all_trades=DataFrame() + buy_keys =[] + sell_keys=[] + for obook in self.all_books.keys(): + csell = self.all_books[obook].cleared_sell_sum_quantity.sum() + cbuy =self.all_books[obook].cleared_buy_sum_quantity.sum() + all_trades[str(obook)+ '_sell' ] = csell + all_trades[str(obook)+ '_buy' ] = cbuy + if (str(obook) =='DAM')|(str(obook) =='IDM'): + #store keys of commodity markets to check consistency + buy_keys =buy_keys +[str(obook)+ '_buy'] + sell_keys =sell_keys +[str(obook)+ '_sell'] + + all_trades['sum_trades'] = all_trades[buy_keys].sum(axis=1)-all_trades[sell_keys].sum(axis=1) + return (all_trades) + + def get_all_trade_schedules(self): + #latest trade schedules per agent in a dataframe + all_trade_schedules = DataFrame() + for agent in self.model.schedule.agents: + df=agent.trade_schedule.copy() + df.drop(['commit','total_dispatch'], axis=1, inplace=True) + df.columns=pd.MultiIndex.from_product([[agent.unique_id],df.columns]) + all_trade_schedules = pd.concat([all_trade_schedules,df], axis=1) + return (all_trade_schedules) + + def get_all_returns(self): + #latest returbs per agent in a dataframe + all_returns = DataFrame() + for agent in self.model.schedule.agents: + df=agent.financial_return.copy() + df.drop(['commit'], axis=1, inplace=True) + df.columns=pd.MultiIndex.from_product([[agent.unique_id],df.columns]) + all_returns = pd.concat([all_returns,df], axis=1) + return (all_returns) + + + def get_system_dispatch(self): + #latest dispatch schedules per agent in a dataframe + all_scheds = {} + for agent in self.model.schedule.agents: + for asset in agent.assets['object']: + name = asset.assetID + all_scheds[name]=asset.schedule['commit'] + dispatch_df = DataFrame.from_dict(all_scheds) + dispatch_df['sum_dispatch'] = dispatch_df.sum(axis=1) + return (dispatch_df) + + def get_system_cost(self): + """ critics: intraday return is counted twice, while redispatch and DA are not""" + all_return = {} + all_dispatch_cost = {} + for agent in self.model.schedule.agents: + all_return[agent.unique_id] = agent.financial_return['total_return'] + all_dispatch_cost[agent.unique_id] = agent.financial_return['total_dispatch_costs'] + return_df = DataFrame.from_dict(all_return) + return_df['sum_return'] = return_df.sum(axis = 1) + dcost_df = DataFrame.from_dict(all_dispatch_cost) + dcost_df['sum_dispatch_cost'] = dcost_df.sum(axis = 1) + df = pd.concat([return_df['sum_return'],dcost_df['sum_dispatch_cost']], axis =1) + df['producer_surplus'] = df['sum_return'] + df['sum_dispatch_cost'] + return (df) + + + def redispatch_PI(self, norm_values_to =None): + """ + Method provides the performance indicators 'over-procurement', + 'under-procurement' and 'redispatch imbalance' per delivery timestamp + Note that the values are in MW, not MWh""" + + #get a dataframe with cleared redispatch and imbalances for the past up to horizon end. + r_pi = pd.concat([self.model.aGridAndSystemOperator.imbalances['imbalance_redispatch'], + self.model.aGridAndSystemOperator.system_transactions[['RDM_buy', 'RDM_sell']]],axis=1) + r_pi['redispatch_demand'] = np.nan + + #Get all identified congestions up to this simulation step + cong = self.model.exodata.congestions.set_index(['identification_day','identification_MTU']).loc[ + :self.model.schedules_horizon.index[0]] + #ensure that congestions before starttime are not considered + cong=cong.loc[(self.model.exodata.sim_task[ 'start_day'],self.model.exodata.sim_task[ 'start_MTU']):].reset_index() + + if self.model.exodata.sim_task['congestions'] == 'exogenious': + if cong.empty: + pass + else: + for i in range(len(cong)): + #there can be overlapping congestions + a_cong= r_pi.loc[(slice(cong.loc[i,'congestion_start_day'],cong.loc[ + i,'congestion_end_day']), slice(cong.loc[i,'congestion_start_time' + ], cong.loc[i,'congestion_end_time' + ])), 'redispatch_demand'] + a_cong = a_cong.add(cong.loc[i,'redispatch_quantity'], fill_value=0) + try: + r_pi.loc[a_cong.index,'redispatch_demand'] = r_pi.loc[a_cong.index,'redispatch_demand'].add(a_cong, fill_value =0) + except: + #a_cong not in the index horizon of r_pi + pass + elif self.model.exodata.sim_task['congestions'] == 'from_scenario': + if cong.empty: + pass + else: + cong.set_index(['delivery_day','delivery_time'], inplace=True) + r_pi.loc[cong.index,'redispatch_demand'] = cong['congestion_MW'] + else: + raise Exception('redispatch_PI doesnt now sim_task congestion paramameter') + + r_pi['residual_demand_downwards'] = r_pi['RDM_buy']- r_pi['redispatch_demand'] + r_pi['residual_demand_upwards'] = r_pi['RDM_sell']- r_pi['redispatch_demand'] + r_pi['overproc_downwards'] = r_pi['residual_demand_downwards'].where(r_pi['residual_demand_downwards']>0,np.nan) + r_pi['underproc_downwards'] = -r_pi['residual_demand_downwards'].where(r_pi['residual_demand_downwards']<0,np.nan) + r_pi['overproc_upwards'] = r_pi['residual_demand_upwards'].where(r_pi['residual_demand_upwards']>0,np.nan) + r_pi['underproc_upwards'] = -r_pi['residual_demand_upwards'].where(r_pi['residual_demand_upwards']<0,np.nan) + r_pi['redispatch_solved'] =pd.concat([r_pi['redispatch_demand'].fillna(value=0).where(r_pi['RDM_buy']>=r_pi['redispatch_demand'].fillna(value=0), r_pi['RDM_buy'] + ), r_pi['redispatch_demand'].fillna(value=0).where(r_pi['RDM_sell']>=r_pi['redispatch_demand'].fillna(value=0), r_pi['RDM_sell'])], axis=1).sum(axis=1)/2 + """to be added when needed""" + #sum overprocurement and underprocument relative to redisaptch demand, load, peak,load + return(r_pi) + + def redispatch_supply_demand_ratio(self): + #Please consult ASAM documentation for rational behind demand supply ratio + if self.model.red_obook.redispatch_demand_upward_all_df.empty: + print('redispatch_supply_demand_ratio determined as demand df is empty') + return (None) + else: + #quantity mean supply/demand ratio per offer time + demand_up =self.model.red_obook.redispatch_demand_upward_all_df.groupby(by=['delivery_day','delivery_time','delivery_location','offer_daytime']).sum()['quantity'] + demand_down =self.model.red_obook.redispatch_demand_downward_all_df.groupby(by=['delivery_day','delivery_time','delivery_location','offer_daytime']).sum()['quantity'] + supply_up =self.model.red_obook.sellorders_all_df.groupby(by=['delivery_day','delivery_time','delivery_location','offer_daytime']).sum()['quantity'] + supply_down =self.model.red_obook.buyorders_all_df.groupby(by=['delivery_day','delivery_time','delivery_location','offer_daytime']).sum()['quantity'] + + #remove redispatch demand for past delivery times + demand_up= demand_up.where(demand_up.reset_index().set_index( + ['delivery_day','delivery_time'] + ).index.values >=demand_up.reset_index()['offer_daytime'].values,np.nan) + demand_up =demand_up.to_frame().join(supply_up.to_frame(), lsuffix='_demand',rsuffix='_supply').copy() + + demand_down= demand_down.where(demand_down.reset_index().set_index( + ['delivery_day','delivery_time'] + ).index.values >=demand_down.reset_index()['offer_daytime'].values,np.nan) + demand_down =demand_down.to_frame().join(supply_down.to_frame(), lsuffix='_demand',rsuffix='_supply').copy() + + #calculate ratio + demand_up['s_d_ratio'] =demand_up['quantity_supply'].fillna(value=0).values/demand_up['quantity_demand'].values + demand_down['s_d_ratio'] =demand_down['quantity_supply'].fillna(value=0).values/demand_down['quantity_demand'].values + r_pi=Series() + #make mean values + r_pi['av_s_d_ratio_up'] = demand_up['s_d_ratio'].mean() + r_pi['av_s_d_ratio_down'] = demand_down['s_d_ratio'].mean() + return(r_pi) + + + def interdependence_indicators(self, quantity_indicator='sum'): + """ Method + calculates statistics across offer time and delivery time on various markets. + + Note: + quantity results can be confusing, as e.g. average cleared quantity might + be larger than average offered quantity. + + Total quantity (sum), on the other hand, does not entail the 'averaging effects', + but can also be confusing because of large differences in trading periods + and resulting large differences in offered quantity. For both indicators + it is important to notice that cleared quantity influence the offered quantity. + """ + + indicators= DataFrame(index =list(self.all_books.keys()), columns=[ + 'qty_av_av_sell','qty_av_av_buy','qty_av_av_sell_cleared','qty_av_av_buy_cleared', + 'price_med_wav_sell','price_med_wav_buy','price_med_wav_sell_cleared','price_med_wav_buy_cleared', + 'return', 'return [%]']) + + for obook in self.all_books.keys(): + #calculate the average quantity offered per round, normed over all delivery periods + if not self.all_books[obook].buyorders_all_df.empty: + qty_buy= self.all_books[obook].buyorders_all_df.groupby(by=['delivery_day', 'delivery_time','offer_daytime'])['quantity'].sum() + indicators.loc[obook, 'qty_av_av_buy'] = (qty_buy.unstack(level=[0,1])).mean().mean() + indicators.loc[obook, 'qty_total_buy_MWh'] = qty_buy.sum().sum()/4 + if not self.all_books[obook].sellorders_all_df.empty: + qty_sell= self.all_books[obook].sellorders_all_df.groupby(by=['delivery_day', 'delivery_time','offer_daytime'])['quantity'].sum() + indicators.loc[obook, 'qty_av_av_sell'] = (qty_sell.unstack(level=[0,1])).mean().mean() + indicators.loc[obook, 'qty_total_sell_MWh'] =qty_sell.sum().sum()/4 + if not self.all_books[obook].cleared_buyorders_all_df.empty: + qty_buy_cleared= self.all_books[obook].cleared_buyorders_all_df.groupby(by=['delivery_day', 'delivery_time','offer_daytime'])['cleared_quantity'].sum() + indicators.loc[obook, 'qty_av_av_buy_cleared'] = (qty_buy_cleared.unstack(level=[0,1])).mean().mean() + indicators.loc[obook, 'qty_total_buy_cleared_MWh'] =qty_buy_cleared.sum().sum()/4 + if not self.all_books[obook].cleared_sellorders_all_df.empty: + qty_sell_cleared=self.all_books[obook].cleared_sellorders_all_df.groupby(by=['delivery_day', 'delivery_time','offer_daytime'])['cleared_quantity'].sum() + indicators.loc[obook, 'qty_av_av_sell_cleared'] = (qty_sell_cleared.unstack(level=[0,1])).mean().mean() + indicators.loc[obook, 'qty_total_sell_cleared_MWh'] =qty_sell_cleared.sum().sum()/4 + + qty_sell=self.all_books[obook].sell_sum_quantity + qty_buy=self.all_books[obook].buy_sum_quantity + qty_sell_cleared=self.all_books[obook].cleared_sell_sum_quantity + qty_buy_cleared=self.all_books[obook].cleared_buy_sum_quantity + + indicators.loc[obook, 'price_med_wav_sell'] = (self.all_books[obook].sell_wm_price * qty_sell/( + qty_sell.sum().T)).sum().median() + indicators.loc[obook, 'price_med_wav_buy'] = (self.all_books[obook].buy_wm_price * qty_buy/( + qty_buy.sum().T)).sum().median() + indicators.loc[obook, 'price_med_wav_sell_cleared'] = (self.all_books[obook].cleared_sell_wm_price * qty_sell_cleared/( + qty_sell_cleared.sum().T)).sum().median() + indicators.loc[obook, 'price_med_wav_buy_cleared'] = (self.all_books[obook].cleared_buy_wm_price * qty_buy_cleared/( + qty_buy_cleared.sum().T)).sum().median() + + indicators.loc[obook,'price_med_wav_spread'] = indicators.loc[obook, + 'price_med_wav_sell_cleared'] - indicators.loc[obook, 'price_med_wav_buy_cleared'] + + + indicators['total_qty_cleared [%]'] =(indicators[['qty_total_sell_cleared_MWh', + 'qty_total_buy_cleared_MWh']].sum(axis=1))/( + indicators[['qty_total_sell_cleared_MWh','qty_total_buy_cleared_MWh']].sum().sum()) *100 + + all_returns =DataFrame(index=['DA_return','ID_return','RD_return', 'BE_return','IB_return']) + all_profit_loss =DataFrame(index=['total_dispatch_costs', 'profit']) + + for agent in self.model.schedule.agents: + all_returns= pd.concat([all_returns,agent.financial_return[[ + 'DA_return','ID_return','RD_return', 'BE_return','IB_return']].sum()], axis=1) + all_profit_loss=pd.concat([all_profit_loss,agent.financial_return[['total_dispatch_costs', 'profit']].sum()],axis=1) + + all_profit_loss=all_profit_loss.sum(axis=1) + all_profit_loss['system_operations_cost'] = self.model.aGridAndSystemOperator.financial_return['total_return'].sum() + all_profit_loss.rename({'profit': 'market_profit', 'total_dispatch_costs':'total_dispatch_cost'}, inplace=True) + all_profit_loss['cost_of_electricity'] =all_profit_loss['total_dispatch_cost'] -all_profit_loss['market_profit'] + + all_returns=all_returns.abs().sum(axis=1) + #to avoid double counting of IDM returns (sellers and buyers) + all_returns['ID_return'] =all_returns['ID_return']/2 + if 'IDM' in indicators.index: + indicators.loc['IDM', 'return'] = all_returns['ID_return'] + if 'DAM' in indicators.index: + indicators.loc['DAM', 'return'] = all_returns['DA_return'] + if 'RDM' in indicators.index: + indicators.loc['RDM', 'return'] = all_returns['RD_return'] + if 'BEM' in indicators.index: + indicators.loc['BEM', 'return'] = all_returns['BE_return'] + indicators['return [%]'] =indicators['return']/indicators['return'].sum()*100 + return(indicators, all_profit_loss) + + def final_keyfigures(self): + """report overview of key indicators of a full simulation run. + This is the place to add new key indicators. However, they shoud be calculated in seperate methods""" + indicators, allprofitloss =self.interdependence_indicators() + index_lst=[] + value_lst=[] + unit_lst=[] + + index_lst+=list(self.model.aGridAndSystemOperator.system_transactions.sum().index) + value_lst+=list((self.model.aGridAndSystemOperator.system_transactions.sum()/4).values) + unit_lst+=['MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh'] + + index_lst+=list(self.model.aGridAndSystemOperator.imbalances.sum().index) + value_lst+=list((self.model.aGridAndSystemOperator.imbalances.sum()/4).values) + unit_lst+=['MWh','MWh','MWh','MWh','MWh','MWh'] + + index_lst+=list(self.redispatch_PI().sum().index) + value_lst+=list((self.redispatch_PI().sum()/4).values) + unit_lst+=['MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh','MWh'] + + if not self.redispatch_supply_demand_ratio() is None: + index_lst+=list(self.redispatch_supply_demand_ratio().index) + value_lst+=list(self.redispatch_supply_demand_ratio().values) + unit_lst+=['p.u. of redispatch demand upwards','p.u. of redispatch demand downwards'] + + index_lst+=list(self.model.aGridAndSystemOperator.financial_return.sum().index) + value_lst+=list(self.model.aGridAndSystemOperator.financial_return.sum().values) + unit_lst+=['€','€','€','€','€'] + + index_lst+=list(allprofitloss.index) + value_lst+=list(allprofitloss.values) + unit_lst+=['€','€','€','€'] + + + if (self.model.exodata.DA_residual_load is not None): + #DA_residual_load may be none if a flat static profile is chosen. + index_lst+=list(self.model.exodata.DA_residual_load.set_index( + ['delivery_day','delivery_time']).loc[ + self.model.aGridAndSystemOperator.financial_return.index,['load_DA_cor','residual_load_DA']].sum().index) + value_lst+=list((self.model.exodata.DA_residual_load.set_index( + ['delivery_day','delivery_time']).loc[ + self.model.aGridAndSystemOperator.financial_return.index,['load_DA_cor','residual_load_DA']].sum()/4).values) + unit_lst+=['MWh','MWh'] + else: + index_lst+=['load_DA_cor','residual_load_DA'] + value_lst+=[np.nan, np.nan] + unit_lst+=['MWh','MWh'] + + if (len(index_lst)!=len(value_lst))|(len(index_lst)!=len(unit_lst)): + raise Exception('final_keyfigures has issues with units.') + + keyfigures = DataFrame([value_lst,unit_lst], index=['value', 'unit']).T + keyfigures.index=index_lst + keyfigures.index.name='indicator' + + #remove duplicates + keyfigures=keyfigures.groupby(keyfigures.index).first() + + return (keyfigures) + + def mark_ups_analysis(self, mode='cleared'): + """ + Method calculates the total mark-up included in orders and adds it to orderbook. + If the mark-up is positive, the order price is 'more expensive'. + If the mark-up is negative the price is 'less expensive', i.e. it is a mark-down. + + Note: + - mode determines if mark-up is added for 'offered', 'cleared',or 'all' orders. + - SRMC are considered as fundamental cost.""" + print('start mark-up analyses (added to orders dataframe). This can take some minutes') + #get the srmc per offer + all_assets=self.model.exodata.get_all_assets() + def get_srmc_from_asset(asset_id): + if asset_id in all_assets['asset_id'].values: + return all_assets.loc[all_assets['asset_id']==asset_id].iloc[0]['srmc'] + else: + return np.nan + + for obook in self.all_books.keys(): + print(obook) + if (mode=='offered')|(mode=='all'): + + self.all_books[obook].sellorders_all_df['mark-up']= self.all_books[ + obook].sellorders_all_df['price'].sub(self.all_books[ + obook].sellorders_all_df['associated_asset'].apply( + lambda x: get_srmc_from_asset(x))) + #mark-ups for buy orders need to be reversed + self.all_books[obook].buyorders_all_df['mark-up']= - self.all_books[ + obook].buyorders_all_df['price'].sub(self.all_books[ + obook].buyorders_all_df['associated_asset'].apply( + lambda x: get_srmc_from_asset(x))) + if (mode=='cleared')|(mode=='all'): + + self.all_books[obook].cleared_sellorders_all_df['mark-up']= self.all_books[ + obook].cleared_sellorders_all_df['price'].sub(self.all_books[ + obook].cleared_sellorders_all_df['associated_asset'].apply( + lambda x: get_srmc_from_asset(x))) + #mark-ups for buy orders need to be reversed + self.all_books[obook].cleared_buyorders_all_df['mark-up']= - self.all_books[ + obook].cleared_buyorders_all_df['price'].sub(self.all_books[ + obook].cleared_buyorders_all_df['associated_asset'].apply( + lambda x: get_srmc_from_asset(x))) + + + + + + + + + + + + diff --git a/asam classes/Time.py b/asam classes/Time.py new file mode 100644 index 0000000..ae4dce9 --- /dev/null +++ b/asam classes/Time.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Aug 11 12:43:04 2017 +@author: Samuel Glismann + +Time class for ASAM marketmodel class. + +The time class provides several time related multi_index templates: + - schedule template (used as forward horizon for agent actions) + - report time matrix (used to store intermediate simulation results) + - report time_location matrix (used to store intermediate simulation results, per grid_area) + +Methods: +get_hour() +get_day() +get_MTU() +asset_schedules_horizon () +calc_timestamp_by_steps () +calc_delivery_period_end () +calc_delivery_period_range () + +Note: + + - The MESA time class sets schedule.steps on 0. After the first schedule.step() + is called, schedule.steps is set to +1. + + - The step size of the model is 15 minutes. At this moment no other options are implemented. + + - Time is usually expressed in tuples of (day, MTU). + + - One day has 96 MTU. + + - The first scheduled step (MESA starts counting with 0) is however MTU 1 (e.g. from 00:00 to 00:15). + + - The model allows to start at a specific startday and startMTU. +""" +import pandas as pd +from pandas import DataFrame + +class Time(): + def __init__(self, model, step_size = "15_minutes", startday = 1, startMTU = 1, step_numbers=None, DA_GCT= 'D-1, MTU 45'): + + self.model = model + if step_numbers == None: + raise Exception('Initiation of Time() instance needs the step_number of the simulation') + self.startday = startday + self.startMTU = startMTU + if (self.startday <= 0)|(self.startMTU <= 0)|(self.startMTU <= 0): + raise Exception ('startday and startMTU must be > 0') + + if step_size == '15_minutes': + #MTU = market time unit = 15 minutes per default + self.step_size = step_size + else: + raise Exception('step_size other than 15 minutes MTU') + + #Note: after day-ahead gate closure time, it takes usually 1 to 2 hours before results are definitive. From this moment + #onwards, market parties plan for the next day. Here it is assumed that results are known instantly. + if DA_GCT == 'D-1, MTU 45': + self.DA_results_known= 45 + else: + raise Exception ('DA GTC not implemented in Time class') + #end date of simulation(tuple) + self.end_date = self.calc_timestamp_by_steps(0,step_numbers-1) + endday = self.end_date[0] + day = list(range(self.startday, endday + 1)) + time = list(range(1,96 + 1)) + gridarea = self.model.gridareas #list + + #make scheduling template + mi2 =pd.MultiIndex.from_product([day, time], + names=['delivery_day', 'delivery_time']) + self.schedule_template= DataFrame(index =mi2) + self.schedule_template.sort_index(level=0,inplace = True) + + #delete all dates before start moment + self.schedule_template = self.schedule_template.iloc[self.startMTU-1:] + self.schedule_template['commit'] = 0 + + #make report matrix from schedule_template by adding step time dimension + mi3 =self.schedule_template.index + simu_days=[] + simu_mtu=[] + #limit rows to number of steps + for i in range(step_numbers): + d,t =self.calc_timestamp_by_steps(0,i) + if simu_days.count(d) == 0: + simu_days.extend([d]) + if simu_mtu.count(t) == 0: + simu_mtu.extend([t]) + mi4=pd.MultiIndex.from_product([simu_days, simu_mtu], + names=['simulation_day', 'simulation_time']) + self.report_time_matrix = DataFrame(index = mi4, columns = mi3) + + #for redispatch reporting a location report_time matrix is needed. + gridarea = self.model.gridareas #list + dday_lst=list(self.report_time_matrix.index.get_level_values(level=0)) + mmtu_lst=list(self.report_time_matrix.index.get_level_values(level=1)) + llocation=[] + for i in range(len(gridarea)): + llocation.extend([gridarea[i]]*len(dday_lst)) + #make temporary dataframe to ensure the right index length + df = DataFrame({'delivery_location':llocation, + 'simulation_day':dday_lst*len(gridarea), + 'simulation_time':mmtu_lst*len(gridarea)}) + df.set_index(['delivery_location','simulation_day','simulation_time'], inplace=True) + self.report_location_time_matrix = DataFrame(index = df.index, columns = mi3) + self.report_location_time_matrix.sort_index(inplace=True, level=[0,1,2]) + del df + + + def get_hour(self): + if self.step_size == "15_minutes": + # // floor devision returns the quotient in which the digits after the decimal point are removed + hour = ((self.model.schedule.steps + self.startMTU -1)//4) % 24 +1 + return(hour) + + def get_day(self): + if self.step_size == "15_minutes": + # // floor devision eturns the quotient in which the digits after the decimal point are removed + day = ((self.model.schedule.steps + self.startMTU + self.startday * 24 * 4 -1) // 96) + return (day) + + def get_MTU(self): + if self.step_size == "15_minutes": + #mtu between 1 and 96 + MTU = (self.model.schedule.steps + self.startMTU -1) % 96 +1 + return (MTU) + + def asset_schedules_horizon (self): + cur_MTU = self.get_MTU() + + #on the last simluation day, the horizon is not extended to the next day. + if (cur_MTU >= self.DA_results_known)&( + self.schedule_template.index.get_level_values(0)[-1] >= self.get_day()+1): + endday = self.get_day()+1 + else: + endday = self.get_day() + df = self.schedule_template.loc[(slice(endday),slice(None)),:].copy() + df =df.iloc[self.model.schedule.steps:] + return (df) + + def calc_timestamp_by_steps (self, paststeps, deltasteps): + if self.step_size == "15_minutes": + MTU = (self.startMTU + paststeps + deltasteps -1) % 96 +1 + #day + day = ((self.startMTU + paststeps + deltasteps + self.startday * 24 * 4 -1) // 96) + return (day, MTU) + + def calc_delivery_period_end (self, starttuple, deliveryperiod_MTUs): + """this method is used to calculate the end mtu of a delivery period of block orders. + With delivery period of 1 MTU the start delivery and end delivery MTU are equivalent.""" + if(starttuple[1]<0)|(starttuple[1]>96)|(starttuple[0]<0): + raise Exception ('starttuple given to calc_delivery_period_end contains infeasable values for day or time') + + if self.step_size == "15_minutes": + MTU = (starttuple[1] + deliveryperiod_MTUs -1 -1) % 96 +1 + #day + day = ((starttuple[1] + deliveryperiod_MTUs + starttuple[0] * 24 * 4 -1 -1) // 96) + return (day, MTU) + + def calc_delivery_period_range (self, startday, startmtu, deliveryperiod_MTUs): + """this method is used to calculate the a delivery period range of a block order. + it returns a list for the days and a list for the mtu. + ATTENTION: it returns a list excluding the startday and startmtu + """ + if(startmtu<0)|(startmtu>96)|(startday<0): + raise Exception ('starttuple given to calc_delivery_period_range contains infeasable values for day or time') + + if self.step_size == "15_minutes": + mtu_lst =[] + day_lst=[] + for i in range(int(deliveryperiod_MTUs)-1): + MTU = (startmtu + i) % 96 +1 + #day + day = ((startmtu + i + startday * 24 * 4) // 96) + mtu_lst +=[int(MTU)] + day_lst +=[int(day)] + return (day_lst, mtu_lst) + + + + + + + + + + + + + + + + + diff --git a/asam classes/Visualization.py b/asam classes/Visualization.py new file mode 100644 index 0000000..9252849 --- /dev/null +++ b/asam classes/Visualization.py @@ -0,0 +1,766 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 16 12:25:08 2018 +@author: Samuel Glismann + +Visualization methods for ASAM + +Methods: + show_asset_schedules(self,header=False) + show_asset_schedules(self,header=False) + show_trade_per_agent(self, only_agent =None,header=False) + show_dispatch_per_agent(self,only_agent =None,header=False) + show_return_per_agent(self, only_agent =None,header=False) + show_demand_supply_IDM(self, simulation_daytime, delivery_day, delivery_time,header=False) + show_system_balance(self,header=False) + show_redispatch_PI(self,header=False) + show_redispatch_summary(self,header=False) + show_cleared_prices(self,header=False) + show_cost_distribution(self, header=False) + + +""" +import pandas as pd +from pandas import Series, DataFrame +import numpy as np +import matplotlib.pyplot as plt +from datetime import datetime +from matplotlib.ticker import FuncFormatter, MaxNLocator + + +class Visualizations(): + def __init__(self, model): + self.model = model + #directory to store figures + self.dir = self.model.exodata.output_path + #Use simulation name for figure names + self.sname = self.model.exodata.sim_name +'_' + + def show_asset_schedules(self,header=False): + """ + Method: all asset schedules are plotted in one figure + """ + print('----plot asset schedules in percentage of Pmax') + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + all_scheds ={} + for agent in self.model.schedule.agents: + for asset in agent.assets['object']: + name = agent.unique_id + str('_') + asset.assetID + all_scheds[name]=asset.schedule['commit']/asset.pmax*100 + + fig = plt.figure(figsize=(7,5)); + ax = fig.add_subplot(1,1,1) + i = 1 + key_lst=sorted(all_scheds.keys()) + for key in key_lst: + #linestyle jumps from '-' to '--' with i jumping from 1 to -1 + linestyle = ["-",'-',"--"] + df = all_scheds[key] + titl = 'asset_schedules_at_timestamp Day_{0}_MTU_{1}'.format(day,MTU) + df.plot(ax = ax,grid = True, style=linestyle[i], subplots = False, + rot=45,legend=False, title =titl, + label = key) + i =i*(-1) + + ax.set_ylim(bottom=0, top = 102) + # Shrink current axis by 30% + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width * 0.7, box.height]) + # Put a legend to the right of the current axis + ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + plt.ylabel('% of Pmax') + + strFile ='results/'+titl+'.png' + fig.savefig(strFile) # save the figure to file + plt.close(fig) + + def show_trade_per_agent(self, only_agent =None,header=False): + """ + Method: all trade schedules are plotted per market party + """ + print('-----plot show_trade_per_agent') + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + + #stacked trade positions as areas and stacked disaptch as lines per agent + all_scheds ={} + ymin_ag =[] + ymax_ag =[] + if not only_agent: + #all agents are plotted + for agent in self.model.schedule.agents: + all_scheds[agent.unique_id]=agent.trade_schedule.copy()/1000 + ymin_ag += [min(agent.trade_schedule.min().min()/1000, 0)] + ymax_ag += [max(agent.trade_schedule.max().max()/1000, 0)] + + else: + agent = self.model.MP_dict[only_agent] + #only one agent is plotted + all_scheds[agent.unique_id]=agent.trade_schedule.copy()/1000 + ymin_ag += [min(agent.trade_schedule.min().min()/1000, 0)] + ymax_ag += [max(agent.trade_schedule.max().max()/1000, 0)] + + ymin = min(ymin_ag) + ymax = max(ymax_ag) + if (ymax != 0): + ymax=ymax + 0.1*abs(ymax) + else: + ymax=20 + if (ymin !=0 ): + ymin=ymin - 0.1*abs(ymin) + else: + ymin=-20 + + key_lst=sorted(all_scheds.keys()) + if len(key_lst) ==1: + fwidth = 7 + fhight = 6 + number_rows = 1 + number_col = 1 + elif len(key_lst)< 5: + fwidth = 8 + fhight = 8 + number_rows = 2 + number_col = 2 + else: + fwidth = 10 + fhight = 11.5 + number_rows = round((len(key_lst)+len(key_lst)%4)/4) + number_col = 4 + + fig = plt.figure(figsize=(fwidth,fhight)); + suptitl='Trade positions at day {} MTU {}'.format(day,MTU) + if header==True: + plt.suptitle(suptitl) + for i in range(len(key_lst)): + if i == 0: + choice = True + else: + choice = False + ax = fig.add_subplot(number_rows, number_col, i+1) + df = all_scheds[key_lst[i]].copy() + #name change needed for adequaat mask operation below + df.rename(columns={'forecast_error':'FE'}, inplace=True) + # Shrink current axis by 30% + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width, box.height]) + + mask=df[['DA_position','ID_position','RD_position', 'FE']]>=0 + df[['DAM_position','IDM_position','RDM_position','forecast_error']]=df[['DA_position','ID_position','RD_position','FE']].where(mask, np.nan) + df[['DAM_neg','IDM_neg','RDM_neg','neg_FE']]=df[['DA_position','ID_position','RD_position','FE']].where(~mask, np.nan) + + titl = 'agent {0}'.format(key_lst[i]) + df[['DAM_position','IDM_position','RDM_position','forecast_error']].plot.area(ax = ax, + stacked=True, grid = True, subplots = False, + legend=False, title =titl, color=[ + 'mediumaquamarine','cornflowerblue','mediumpurple','orange']) +# 'cornflowerblue','mediumpurple','orange']) + df[['DAM_neg','IDM_neg','RDM_neg','neg_FE']].plot.area(ax = ax, + stacked=True, grid = True, subplots = False, + legend=False, title =titl, color=[ + 'mediumaquamarine','cornflowerblue','mediumpurple','orange']) +# 'cornflowerblue','mediumpurple','orange']) + df[['total_trade','total_dispatch','imbalance_position']].plot(ax = ax, + grid = True, subplots = False, style="--", + legend=False, title =titl, color=['darkgreen','darkblue','red']) + if choice==True: + handels, labels = ax.get_legend_handles_labels() + ax.set_ylim(bottom=ymin, top = ymax) + if (i >=12): + plt.xlabel('delivery_day, delivery_MTU') + else: + plt.xlabel('') + if (i %4==0): + plt.ylabel('GW') + + if len(key_lst)>= 5: + plt.subplots_adjust(top=0.89, + bottom=0.04, + left=0.09, + right=0.97, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.935), ncol =4) + + + elif len(key_lst)== 1: + plt.subplots_adjust(top=0.8, + bottom=0.045, + left=0.115, + right=0.96, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.90), ncol =4) + + else: + plt.subplots_adjust( + top=0.86, + bottom=0.04, + left=0.09, + right=0.97, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.925), ncol =4) + + + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + if only_agent: + suptitl =suptitl +'_'+agent.unique_id + fig.savefig(self.dir+self.sname+suptitl+' '+stamp+'.png') # save the figure to file + plt.close(fig) + + def show_dispatch_per_agent(self,only_agent =None,header=False): + + """ + Method: all trade schedules are plotted per market party + + Input: with only_agent (string agent name), a single agent can be plotted + """ + + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + print('-----plot show_dispatch_per_agent') + def format_fn(tick_val, tick_pos): + """local function to set ticks on x axes""" + if int(tick_val) in xs: + return labels[int(tick_val)] + else: + return '' + asset_owners_set={} + + fig = plt.figure(figsize=(10,10)); + suptitl='Asset dispatch at day {} MTU {}'.format(day,MTU) + if header==True: + plt.suptitle(suptitl) + for agent in self.model.schedule.agents: + all_scheds ={} + for asset in agent.assets['object']: + name = asset.assetID + all_scheds[name]=asset.schedule['commit']/asset.pmax*100 + asset_owners_set[agent.unique_id]=all_scheds.copy() + key_lst=sorted(asset_owners_set.keys()) + for i in range(len(key_lst)): + ax = fig.add_subplot(2,2,i+1) + asset_lst=sorted(asset_owners_set[key_lst[i]].keys()) + for k in range(len(asset_lst)): + df = asset_owners_set[key_lst[i]][asset_lst[k]] + titl = 'Dispatch of agent {0}'.format(key_lst[i],day,MTU) + xs=range(len(df)) + labels = df.index.values + ax.xaxis.set_major_formatter(FuncFormatter(format_fn)) + ax.plot(df.values, drawstyle ='steps-pre', label=asset_lst[k]) + plt.title(titl) + ax.legend(loc="best") + ax.grid(True) + ax.set_ylim(bottom=0, top = 105) + if (i ==2)|(i==3): + plt.xlabel('delivery_day, delivery_MTU') + if (i ==0)|(i==2): + plt.ylabel('% of Pmax') + + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+self.sname+suptitl+' '+stamp+'.png') # save the figure to file + plt.close(fig) + + + def show_demand_supply_IDM(self, simulation_daytime, delivery_day, delivery_time,header=False): + """ + Method: plot supply-demand curve of intra-day market + Input: + simulation_daytime: tuple of simulation day and simulation time (MTU). e.g. (3,1) + delivery_day: integer day + delivery time: integer MTU""" + + print('-----plot show_demand_supply_IDM') + sellorders = self.model.IDM_obook.sellorders_all_df.set_index('offer_daytime') + buyorders= self.model.IDM_obook.buyorders_all_df.set_index('offer_daytime') + allsupply= sellorders.loc[sellorders.index.isin([simulation_daytime])].reset_index().set_index(['delivery_day','delivery_time']) + supply = allsupply.loc[allsupply.index.isin([(delivery_day, delivery_time)])].sort_values('price').reset_index()[ + ['price', 'quantity', 'direction']] + supply['MW']=supply['quantity'].cumsum() + alldemand= buyorders.loc[buyorders.index.isin([simulation_daytime])].reset_index().set_index(['delivery_day','delivery_time']) + demand = alldemand.loc[alldemand.index.isin([(delivery_day, delivery_time)])].sort_values('price', ascending=False).reset_index()[ + ['price', 'quantity', 'direction']] + demand['MW']=demand['quantity'].cumsum() + #make multicolumn dataframe from supply and demand + demand_supply_df =pd.concat([demand,supply], axis=0).set_index('direction', append=True) + demand_supply_df= demand_supply_df.unstack(1).swaplevel(0,1,1).sort_index(1) + demand_supply_df.sort_index(axis=1, level=[0,1],inplace=True) + + if demand_supply_df.empty: + print('demand_supply plot for delivery daytime ({},{}) during simulation daytime {}, neither supply nor demand'.format( + delivery_day, delivery_time,simulation_daytime)) + return + #make dataframe that can be plot + end = demand_supply_df.loc[:,(slice(None),slice('MW','MW'))].max().max()+1 + x= np.linspace(0,end,end+1, endpoint=True) + + df= DataFrame(columns=['x','NaN','sell_price', 'buy_price']) + df['x']=x + df.set_index('x', inplace=True) + + try: + df['sell_price']=demand_supply_df[[('sell','price'),('sell','MW')]].set_index([('sell','MW')]) + df['sell_price']=df['sell_price'].bfill() + except: + df['sell_price']=np.nan + try: + df['buy_price']=demand_supply_df[[('buy','price'),('buy','MW')]].set_index([('buy','MW')]) + df['buy_price']=df['buy_price'].bfill() + except: + df['buy_price']=np.nan + fig = plt.figure(figsize=(8,5)); + plt.step(x=df.index, y=df['sell_price'],where='pre', label=['supply']) + plt.step(x=df.index, y=df['buy_price'],where='pre', label=['demand']) + if header==True: + titl='simulation daytime {0}, delivery daytime ({1}, {2})'.format(simulation_daytime,delivery_day, delivery_time) + else: + titl='' + plt.title(titl) + plt.legend(loc="best") + plt.grid(True) + plt.xlabel('MW') + plt.ylabel("Eur/MW") + plt.tight_layout() + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+self.sname+'demand_supply_curve'+str(simulation_daytime)+'_('+str(delivery_day)+'_'+str(delivery_time)+')i_'+stamp+'.png') # save the figure to file + plt.close(fig) + + def show_system_balance(self,header=False): + """ + Method: plot of system balance and aggregated transactions per market + """ + print('-----plot show_system_balance') + def format_fn(tick_val, tick_pos): + """local function to set ticks on x axes""" + if int(tick_val) in xs: + return labels[int(tick_val)] + else: + return '' + + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + ymin=0 + ymax=0 + sell_labels = [] + buy_labels = [] + shown_labels = [] + #to enable plots with a subset of simulated markets, lables need to be selected + if self.model.exodata.sim_task['run_DAM[y/n]'][0] =='y': + sell_labels += ['DAM_sell'] + buy_labels += ['DAM_buy'] + shown_labels += ['DAM'] + if self.model.exodata.sim_task['run_IDM[y/n]'][0] =='y': + sell_labels += ['IDM_sell'] + buy_labels += ['IDM_buy'] + shown_labels += ['IDM'] + if self.model.exodata.sim_task['run_RDM[y/n]'][0] =='y': + sell_labels += ['RDM_sell'] + buy_labels += ['RDM_buy'] + shown_labels += ['RDM'] + if self.model.exodata.sim_task['run_BEM[y/n]'][0] =='y': + sell_labels += ['BEM_sell'] + buy_labels += ['BEM_buy'] + shown_labels += ['BEM'] + df = pd.concat([self.model.rpt.get_system_dispatch(), + self.model.aGridAndSystemOperator.system_transactions.loc[:self.model.schedules_horizon.index[-1]], + self.model.aGridAndSystemOperator.imbalances.loc[:self.model.schedules_horizon.index[-1]]], axis=1) + + #make sell negative according to convention + df[sell_labels]=-1*df[sell_labels] + #for simple plotting (only one legend sell and buy) + df[shown_labels]=df[sell_labels] + + ymin=min(df[sell_labels].sum(axis=1).min().min(),df['sum_dispatch'].min().min(), ymin) + + ymax=max(df[buy_labels].sum(axis=1).max().max(),df['sum_dispatch'].max().max(), ymax) + + if (ymax>0)|(ymax<0): + ymax=int(ymax + 0.1*abs(ymax)) + else: + ymax=20 + if (ymin>0)|(ymin<0): + ymin=int(ymin - 0.1*abs(ymin)) + else: + ymin=-20 + fig = plt.figure(figsize=(8,5)); + ax = fig.add_subplot(1,1,1) + + # Shrink current axis by 30% + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width* 0.6, box.height]) + titl = 'System trade and dispatch at day {0},MTU {1}'.format(day, MTU) + if header==True: + plt.title(titl) + + df[shown_labels].plot.area(ax=ax, + stacked=True, grid = True, subplots = False, + legend=True, color=['mediumaquamarine','cornflowerblue','mediumpurple','orange']) + + df[buy_labels].plot.area(ax=ax, stacked=True, + grid = True, subplots = False, + legend=False, color=['mediumaquamarine','cornflowerblue','mediumpurple','orange']) + df[['sum_dispatch','imbalance_redispatch','imbalance_market(scheduled)','imbalance_market(realized)']].plot( + ax=ax,color=['darkblue','darkviolet','orange','red'],stacked=False, + grid = True, subplots = False, + legend=True, style="--") + + handels, labels = handles, labels = ax.get_legend_handles_labels() + + plt.legend(handels[:8],labels[:8],loc='center left', bbox_to_anchor=(1.00, 0.5)) + + ax.set_ylim(bottom=ymin, top = ymax) + plt.ylabel('MW') + plt.xlabel('delivery day, MTU') + + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+self.sname+titl+' '+stamp+'.png') # save the figure to file + plt.close(fig) + + + + def show_return_per_agent(self, only_agent =None,header=False): + print('-----plot show_return_per_agent') + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + #to enable plots with a subset of simulated markets, lables need to be selected + sell_labels = [] + buy_labels = [] + shown_labels = [] + #to enable plots with a subset of simulated markets, lables need to be selected + if self.model.exodata.sim_task['run_IDM[y/n]'][0] =='y': + sell_labels += ['DAM_sell'] + buy_labels += ['DAM_buy'] + shown_labels += ['DAM_return'] + if self.model.exodata.sim_task['run_IDM[y/n]'][0] =='y': + sell_labels += ['IDM_sell'] + buy_labels += ['IDM_buy'] + shown_labels += ['IDM_return'] + if self.model.exodata.sim_task['run_RDM[y/n]'][0] =='y': + sell_labels += ['RDM_sell'] + buy_labels += ['RDM_buy'] + shown_labels += ['RDM_return'] + if self.model.exodata.sim_task['run_BEM[y/n]'][0] =='y': + sell_labels += ['BEM_sell'] + buy_labels += ['BEM_buy'] + shown_labels += ['BEM_return'] + + all_scheds ={} + ymin_ag =[] + ymax_ag =[] + if not only_agent: + #all agents are plotted + for agent in self.model.schedule.agents: + all_scheds[agent.unique_id]=agent.financial_return.copy()/1000 + ymin_ag += [min(agent.financial_return.min().min()/1000, 0)] + ymax_ag += [max(agent.financial_return.max().max()/1000, 0)] + + else: + agent = self.model.MP_dict[only_agent] + all_scheds[agent.unique_id]=agent.financial_return.copy()/1000 + ymin_ag += [min(agent.financial_return.min().min()/1000, 0)] + ymax_ag += [max(agent.financial_return.max().max()/1000, 0)] + + ymin = min(ymin_ag) + ymax = max(ymax_ag) + if (ymax != 0): + ymax=ymax + 0.1*abs(ymax) + else: + ymax=20 + if (ymin !=0 ): + ymin=ymin - 0.1*abs(ymin) + else: + ymin=-20 + + key_lst=sorted(all_scheds.keys()) + if len(key_lst) ==1: + fwidth = 7 + fhight = 6 + number_rows = 1 + number_col = 1 + elif len(key_lst)< 5: + fwidth = 8 + fhight = 8 + number_rows = 2 + number_col = 2 + else: + fwidth = 10 + fhight = 11.5 + number_rows = round((len(key_lst)+len(key_lst)%4)/4) + number_col = 4 + + + fig = plt.figure(figsize=(fwidth,fhight)); + suptitl='Financial returns at day {} MTU {}'.format(day,MTU) + if header==True: + plt.suptitle(suptitl) + for i in range(len(key_lst)): + if i == 0: + choice = True + else: + choice = False + ax = fig.add_subplot(number_rows,number_col,i+1) + df = all_scheds[key_lst[i]] + mask=df[['DA_return','ID_return','RD_return', 'IB_return']]>=0 + df[['DAM_return','IDM_return','RDM_return','IBM_return']] = df[['DA_return','ID_return','RD_return', 'IB_return']].where(mask, np.nan) +# import pdb +# pdb.set_trace() + df[['DAM_neg','IDM_neg','RDM_neg', 'IBM_neg']] = df[['DA_return','ID_return','RD_return','IB_return']].where(~mask, np.nan) + + titl = 'agent {0}'.format(key_lst[i]) + + df[['DAM_return','IDM_return','RDM_return', 'IBM_return']].plot.area(ax = ax, + stacked=True, grid = True, subplots = False, + legend=False, title =titl, color=[ + 'mediumaquamarine','cornflowerblue','mediumpurple', 'orange']) + df[['DAM_neg','IDM_neg','RDM_neg', 'IBM_neg']].plot.area( ax = ax, + stacked=True, grid = True, subplots = False, + legend=False, title =titl, color=[ + 'mediumaquamarine','cornflowerblue','mediumpurple','orange']) + df[['total_return','total_dispatch_costs', 'profit']].plot( ax = ax, + grid = True, subplots = False, style=["--", "--",'--'], + legend=False, title =titl, color=['darkgreen','darkblue','red']) + ax.set_ylim(bottom=ymin, top = ymax) + if choice==True: + handels, labels = ax.get_legend_handles_labels() + ax.set_ylim(bottom=ymin, top = ymax) + if (i >=12): + plt.xlabel('delivery_day, delivery_MTU') + else: + plt.xlabel('') + if (i %4==0): + plt.ylabel('thousand €') + + if len(key_lst)>= 5: + plt.subplots_adjust(top=0.89, + bottom=0.04, + left=0.09, + right=0.97, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.935), ncol =4) + + + elif len(key_lst)== 1: + plt.subplots_adjust(top=0.8, + bottom=0.045, + left=0.115, + right=0.96, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.90), ncol =4) + + else: + plt.subplots_adjust( + top=0.86, + bottom=0.04, + left=0.09, + right=0.97, + hspace=0.29, + wspace=0.315) + fig.legend(handels[:7],labels[:7],loc='center', bbox_to_anchor=(0.5, 0.925), ncol =4) + + + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + if only_agent: + suptitl =suptitl +'_'+agent.unique_id + fig.savefig(self.dir+self.sname+suptitl+' '+stamp+'.png') # save the figure to file + plt.close(fig) + + + def show_redispatch_PI(self,header=False): + """ + Method: plot of redispatch performance indicators over time + (see report method redispatch_PI() for more information on content)""" + + print('-----plot show_redispatch_PI') + def format_fn(tick_val, tick_pos): + """local function to set ticks on x axes""" + if int(tick_val) in xs: + return labels[int(tick_val)] + else: + return '' + #get the right time for the graph + day, MTU= self.model.clock.calc_timestamp_by_steps(self.model.schedule.steps -1, 0) + fig = plt.figure(figsize=(7,7)); + ax = fig.add_subplot(1,1,1) + + titl = 'Redispatch Performance Indicators at day {0},MTU {1}'.format(day, MTU) + + df = self.model.rpt.redispatch_PI()[['residual_demand_downwards','residual_demand_upwards', + 'imbalance_redispatch']] + if df is None: + print('redispatch_PI() is None. No plot available') + return + else: + xs=range(len(df)) + labels = df.index.values + ax.xaxis.set_major_formatter(FuncFormatter(format_fn)) + + lineObjects=ax.plot(df.values, drawstyle ='steps-pre', ls='--') + + colors =['darkorange', 'darkred','darkviolet'] + for i in range(len(lineObjects)): + lineObjects[i].set_color(colors[i]) + + # Shrink current axis by 30% + box = ax.get_position() + ax.set_position([box.x0, box.y0*(1.6), box.width, box.height]) + ax.grid(True) + plt.legend(lineObjects, ['residual_demand_downwards','residual_demand_upwards', + 'imbalance_redispatch'],loc='center left', + bbox_to_anchor=(0, -0.15),fancybox=True, shadow=False) + + plt.ylabel('MW (positive residual demand means over-procurement)') + plt.xlabel('delivery day, MTU') + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','') + fig.savefig(self.dir+self.sname+titl+' '+stamp+'.png') # save the figure to file + plt.close(fig) + + def show_redispatch_summary(self,header=False): + + """ + Method: plot of redispatch performance indicators summary (bar plot) + (see report method redispatch_PI() for more information on content)""" + fig = plt.figure(figsize=(6,6)); + if header==True: + plt.suptitle('Redispatch Key Performance Indicators') + key_figures_df = self.model.rpt.redispatch_PI().sum() + indis =['redispatch_demand', 'redispatch_solved','overproc_downwards', 'underproc_downwards', + 'overproc_upwards', 'underproc_upwards','imbalance_redispatch', 'imbalance_market'] + ymax =key_figures_df.loc[indis].max().max()/1000 + ymin =key_figures_df.loc[indis].min().min()/1000 + if (ymin <0)&(ymin >=-1): + ymin = -1 + + ax = fig.add_subplot(1,1,1) + (key_figures_df.loc[indis]/1000).plot( + ax=ax, kind='bar', legend=False, grid=True, rot=90) + ax.set_ylim(bottom=ymin *1.02,top=ymax*1.02) + + plt.ylabel('GWh') + plt.subplots_adjust(top=0.94, + bottom=0.285, + left=0.125, + right=0.79, + hspace=0.2, + wspace=0.2) + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+'redispatch kpi summary'+stamp+'.png') # save the figure to file + plt.close(fig) + + + def show_cleared_prices(self,header=False): + """ + Method: plot cleared prices of markets. + Note: 'RDM_spread_weighted_mean' name is changed to 'RDM_weighted_mean'""" + print('-----plot show_cleared_prices') + def format_fn(tick_val, tick_pos): + """local function to set ticks on x axes""" + if int(tick_val) in xs: + return labels[int(tick_val)] + else: + return '' + + prices= self.model.rpt.get_cleared_prices() + + fig = plt.figure(figsize=(7,5)); + ax = fig.add_subplot(1,1,1) + xs=range(len(prices)) + labels = prices.index.values + ax.xaxis.set_major_formatter(FuncFormatter(format_fn)) + + ax.plot(prices['IDM_weighted_mean'].values, drawstyle ='steps-pre', label='IDM_weighted_mean',alpha=0.8) + ax.plot(prices['RDM_spread_weighted_mean'].values, drawstyle ='steps-pre', label='RDM_weighted_mean',alpha=0.8) + ax.plot(prices['DAP'].values, drawstyle ='steps-pre', label='DAM',alpha=0.8) + ax.plot(prices['IBP_short'].values, drawstyle ='steps-pre', label='IBP_short', alpha=0.3) + ax.plot(prices['IBP_long'].values, drawstyle ='steps-pre', label='IBP_long', alpha=0.3) + + # Shrink current axis by 30% + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width * 0.7, box.height]) + # Put a legend to the right of the current axis + ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.grid(True) + plt.ylabel('€/MWh') + plt.xlabel('delivery day, MTU') + if header==True: + plt.title('Cleared prices') + + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+self.sname+'Cleared prices_'+stamp+'.png') # save the figure to file + plt.close(fig) + + + def show_cost_distribution(self, header=False): + """ + Method: plot various costs and profits + (for more information on parameters see report method final_keyfigures(). + + """ + ## COSTS overview of the simulation + fig = plt.figure(figsize=(6,6)); + costs= ['total_dispatch_cost', 'market_profit', 'system_operations_cost','cost_of_electricity'] + + keyfigures = self.model.rpt.final_keyfigures() + + #If possible the values are 'normalized' with the corrected day-ahead load (+export, - import) + if keyfigures.loc['load_DA_cor','value']>0: + keyfigures.loc[costs, 'value'] = keyfigures.loc[costs, 'value'].div(keyfigures.loc['load_DA_cor','value']) + y_label='€ / net_DA_load_MWh' + suptitl='Cost and profit per MWh day-ahead load (+exp. -imp.)' + elif keyfigures.loc['residual_load_DA','value']>0: + keyfigures.loc[costs, 'value'] = keyfigures.loc[costs, 'value'].div(keyfigures.loc['residual_load_DA','value']) + y_label='€ / net_DA_residual_load_MWh' + suptitl='Cost and profit per MWh day-ahead load (+exp. -imp.)' + else: + y_label='€' + suptitl='Cost and profit' + if header==True: + plt.suptitle(suptitl) + + ymax=keyfigures.loc[costs, 'value'].abs().max().max() + ymin = 0 + offset = max(abs(ymin)*0.05, abs(ymax)*0.05) + + ymin=ymin -offset + ymax=ymax +offset + + ax = fig.add_subplot(1,1,1) + + keyfigures.loc[costs, 'value'].abs().plot(ax=ax, kind='bar', legend=False, grid=True, rot=90) + + ax.set_ylim(bottom=ymin,top=ymax) + plt.ylabel(y_label) + plt.xlabel('') + plt.subplots_adjust(top=0.94, + bottom=0.305, + left=0.125, + right=0.79, + hspace=0.2, + wspace=0.2) + stamp=str(datetime.now().replace(microsecond=0)) + stamp=stamp.replace('.','') + stamp=stamp.replace(':','_') + fig.savefig(self.dir+'profitloss '+stamp+'.png') # save the figure to file + plt.close(fig) + +