diff --git a/.github/workflows/tests-coverage.yml b/.github/workflows/tests-coverage.yml index 4cb0b48a7..90ca8bc52 100644 --- a/.github/workflows/tests-coverage.yml +++ b/.github/workflows/tests-coverage.yml @@ -19,6 +19,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | + pip3 install --upgrade pip pip3 install -e $GITHUB_WORKSPACE[full] pip3 install coveralls - name: Run coverage diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 44c14eaba..a2ecd2f20 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,13 +6,13 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 21.12b0 + rev: 22.3.0 hooks: - id: black -#- repo: https://github.com/pycqa/flake8 -# rev: 4.0.1 -# hooks: -# - id: flake8 +- repo: https://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 - repo: https://github.com/pycqa/isort rev: 5.10.1 hooks: diff --git a/doc/api/edisgo.flex_opt.rst b/doc/api/edisgo.flex_opt.rst index 758711d23..390f6e191 100644 --- a/doc/api/edisgo.flex_opt.rst +++ b/doc/api/edisgo.flex_opt.rst @@ -20,6 +20,14 @@ edisgo.flex\_opt.costs module edisgo.flex\_opt.exceptions module ---------------------------------- +.. automodule:: edisgo.flex_opt.exceptions + :members: + :undoc-members: + :show-inheritance: + +edisgo.flex\_opt.q\_control module +---------------------------------- + .. automodule:: edisgo.flex_opt.exceptions :members: :undoc-members: diff --git a/doc/api/edisgo.tools.rst b/doc/api/edisgo.tools.rst index 601b74013..f50a89b42 100644 --- a/doc/api/edisgo.tools.rst +++ b/doc/api/edisgo.tools.rst @@ -20,6 +20,22 @@ edisgo.tools.edisgo\_run module edisgo.tools.geo module ----------------------- +.. automodule:: edisgo.tools.geo + :members: + :undoc-members: + :show-inheritance: + +edisgo.tools.geopandas\_helper module +---------------------------------------- + +.. automodule:: edisgo.tools.geo + :members: + :undoc-members: + :show-inheritance: + +edisgo.tools.networkx\_helper module +---------------------------------------- + .. automodule:: edisgo.tools.geo :members: :undoc-members: @@ -57,7 +73,6 @@ edisgo.tools.tools module :undoc-members: :show-inheritance: - Module contents --------------- diff --git a/doc/conf.py b/doc/conf.py index 7c739979b..4d4ff311e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -28,7 +28,6 @@ # serve to show the default. import os -import shlex import sys from unittest.mock import MagicMock @@ -55,6 +54,7 @@ "sphinx.ext.viewcode", "sphinx.ext.napoleon", # enable Napoleon Sphinx v>1.3 "sphinx.ext.extlinks", # enables external links with a key + "sphinx_autodoc_typehints", ] # Napoleon settings @@ -76,8 +76,13 @@ "http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.%s.html", "pandas.", ), + "geopandas": ( + "https://geopandas.org/en/stable/docs/reference/api/geopandas.%s.html", + "geopandas.", + ), "networkx": ( - "https://networkx.github.io/documentation/stable/reference/classes/graph.html#%s", + "https://networkx.github.io/documentation/stable/reference/classes/" + "graph.html#%s", "networkx.", ), "sqlalchemy": ( @@ -86,7 +91,7 @@ ), "shapely": ("https://shapely.readthedocs.io/en/latest/manual.html#%s", "shapely."), "ding0": ("https://dingo.readthedocs.io/en/dev/api/ding0.html#%s", "Ding0"), - "pypsa": ("https://pypsa.org/doc/components.html#%s", "pypsa"), + "pypsa": ("https://pypsa.readthedocs.io/en/latest/components.html#%s", "pypsa"), "plotly": ("https://plotly.com/python-api-reference/generated/#%s.html", "plotly"), } # Add any paths that contain templates here, relative to this directory. @@ -104,9 +109,9 @@ master_doc = "index" # General information about the project. -project = u"eDisGo" -copyright = u"2017, open_eGo-Team" -author = u"open_eGo-Team" +project = "eDisGo" +copyright = "2017, open_eGo-Team" +author = "open_eGo-Team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -162,8 +167,9 @@ todo_include_todos = True -# Fix import error of modules which depend on C modules (mock out the imports for these modules) -# see http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules +# Fix import error of modules which depend on C modules (mock out the imports for these +# modules) see http://read-the-docs.readthedocs.io/en/latest/ +# faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules if "READTHEDOCS" in os.environ: class Mock(MagicMock): @@ -181,7 +187,7 @@ def __getattr__(cls, name): # a list of builtin themes. # html_theme = 'alabaster' -import sphinx_rtd_theme +import sphinx_rtd_theme # noqa: E402 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme = "sphinx_rtd_theme" @@ -282,20 +288,20 @@ def __getattr__(cls, name): latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', + # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', + # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. - #'preamble': '', + # 'preamble': '', # Latex figure (float) alignment - #'figure_align': 'htbp', + # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, "edisgo.tex", u"eDisGo Documentation", u"open_eGo-Team", "manual"), + (master_doc, "edisgo.tex", "eDisGo Documentation", "open_eGo-Team", "manual"), ] # The name of an image file (relative to this directory) to place at the top of @@ -323,7 +329,7 @@ def __getattr__(cls, name): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "eDisGo", u"eDisGo Documentation", [author], 1)] +man_pages = [(master_doc, "eDisGo", "eDisGo Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -338,7 +344,7 @@ def __getattr__(cls, name): ( master_doc, "eDisGo", - u"eDisGo Documentation", + "eDisGo Documentation", author, "eDisGo", "One line description of project.", diff --git a/doc/features_in_detail.rst b/doc/features_in_detail.rst index b494d5df4..adefa2864 100644 --- a/doc/features_in_detail.rst +++ b/doc/features_in_detail.rst @@ -7,8 +7,7 @@ Power flow analysis ------------------- In order to analyse voltages and line loadings a non-linear power flow analysis (PF) using pypsa is conducted. -All loads and generators are modelled as PQ nodes; the slack is modelled as a PV node with a set voltage of 1\,p.u. -and positioned at the substation's secondary side. +All loads and generators are modelled as PQ nodes. The slack is positioned at the substation's secondary side. Multi period optimal power flow --------------------------------- @@ -267,10 +266,14 @@ Storage integration .. warning:: The storage integration methods described below are not yet adapted to the refactored code and therefore currently do not work. -Besides the possibility to connect a storage with a given operation to any node in the grid, eDisGo provides a methodology that takes -a given storage capacity and allocates it to multiple smaller storages such that it reduces line overloading and voltage deviations. -The methodology is implemented in :py:func:`~edisgo.flex_opt.storage_positioning.one_storage_per_feeder`. As the above described -curtailment allocation methodologies it is intended to be used in combination with `eTraGo `_ where +Besides the possibility to connect a storage with a given operation to any node in the +grid, eDisGo provides a methodology that takes +a given storage capacity and allocates it to multiple smaller storage units such that it +reduces line overloading and voltage deviations. +The methodology is implemented in :py:func:`~edisgo.flex_opt.storage_positioning.one_storage_per_feeder`. +As the above described +curtailment allocation methodologies it is intended to be used in combination +with `eTraGo `_ where storage capacity and operation is optimized. For each feeder with load or voltage issues it is checked if integrating a diff --git a/doc/quickstart.rst b/doc/quickstart.rst index b0c44b4d9..32c419780 100644 --- a/doc/quickstart.rst +++ b/doc/quickstart.rst @@ -185,13 +185,16 @@ on how to generate grids yourself. A minimum working example ------------------------- -Following you find short examples on how to use eDisGo. Further details are -provided in :ref:`usage-details`. Further examples can be found in the +Following you find short examples on how to use eDisGo to set up a network and time +series information for loads and generators in the network and afterwards conduct a +power flow analysis and determine possible grid expansion needs and costs. Further +details are provided in :ref:`usage-details`. Further examples can be found in the `examples directory `_. All following examples assume you have a ding0 grid topology (directory containing csv files, defining the grid topology) in a directory "ding0_example_grid" in -the directory from where you run your example. +the directory from where you run your example. If you do not have an example grid, you +can download one `here `_. Aside from grid topology data you may eventually need a dataset on future installation of power plants. You may therefore use the scenarios developed in @@ -211,23 +214,25 @@ You can run a worst-case scenario as follows: from edisgo import EDisGo - # Set up the EDisGo object that will import the grid topology, set up - # feed-in and load time series (here for a worst case analysis) - # and other relevant data - edisgo = EDisGo(ding0_grid='ding0_example_grid', - worst_case_analysis='worst-case') + # Set up the EDisGo object - the EDisGo object provides the top-level API for + # invocation of data import, power flow analysis, network reinforcement, + # flexibility measures, etc.. + edisgo_obj = EDisGo(ding0_grid="ding0_example_grid") - # Import scenario for future generators from the oedb - edisgo.import_generators(generator_scenario='nep2035') + # Import scenario for future generator park from the oedb + edisgo_obj.import_generators(generator_scenario="nep2035") - # Conduct grid analysis (non-linear power flow using PyPSA) - edisgo.analyze() + # Set up feed-in and load time series (here for a worst case analysis) + edisgo_obj.set_time_series_worst_case_analysis() + + # Conduct power flow analysis (non-linear power flow using PyPSA) + edisgo_obj.analyze() # Do grid reinforcement - edisgo.reinforce() + edisgo_obj.reinforce() # Determine costs for each line/transformer that was reinforced - costs = edisgo.results.grid_expansion_costs + costs = edisgo_obj.results.grid_expansion_costs Instead of conducting a worst-case analysis you can also provide specific @@ -238,47 +243,59 @@ time series: import pandas as pd from edisgo import EDisGo - # Set up the EDisGo object with your own time series - # (these are dummy time series!) - # timeindex specifies which time steps to consider in power flow - timeindex = pd.date_range('1/1/2011', periods=4, freq='H') + # Set up the EDisGo object with generator park scenario NEP2035 + edisgo_obj = EDisGo( + ding0_grid="ding0_example_grid", + generator_scenario="nep2035" + ) + + # Set up your own time series by load sector and generator type (these are dummy + # time series!) + timeindex = pd.date_range("1/1/2011", periods=4, freq="H") # load time series (scaled by annual demand) timeseries_load = pd.DataFrame( - {'residential': [0.0001] * len(timeindex), - 'retail': [0.0002] * len(timeindex), - 'industrial': [0.00015] * len(timeindex), - 'agricultural': [0.00005] * len(timeindex) + {"residential": [0.0001] * len(timeindex), + "retail": [0.0002] * len(timeindex), + "industrial": [0.00015] * len(timeindex), + "agricultural": [0.00005] * len(timeindex) }, index=timeindex) # feed-in time series of fluctuating generators (scaled by nominal power) timeseries_generation_fluctuating = pd.DataFrame( - {'solar': [0.2] * len(timeindex), - 'wind': [0.3] * len(timeindex) + {"solar": [0.2] * len(timeindex), + "wind": [0.3] * len(timeindex) }, index=timeindex) # feed-in time series of dispatchable generators (scaled by nominal power) timeseries_generation_dispatchable = pd.DataFrame( - {'biomass': [1] * len(timeindex), - 'coal': [1] * len(timeindex), - 'other': [1] * len(timeindex) + {"biomass": [1] * len(timeindex), + "coal": [1] * len(timeindex), + "other": [1] * len(timeindex) }, index=timeindex) - # Set up the EDisGo object with your own time series and generator scenario - # NEP2035 - edisgo = EDisGo( - ding0_grid='ding0_example_grid', - generator_scenario='nep2035', - timeseries_load=timeseries_load, - timeseries_generation_fluctuating=timeseries_generation_fluctuating, - timeseries_generation_dispatchable=timeseries_generation_dispatchable, - timeindex=timeindex) + # Before you can set the time series to the edisgo_obj you need to set the time + # index (this could also be done upon initialisation of the edisgo_obj) - the time + # index specifies which time steps to consider in power flow analysis + edisgo_obj.set_timeindex(timeindex) - # Do grid reinforcement - edisgo.reinforce() + # Now you can set the active power time series of loads and generators in the grid + edisgo_obj.set_time_series_active_power_predefined( + conventional_loads_ts=timeseries_load, + fluctuating_generators_ts=timeseries_generation_fluctuating, + dispatchable_generators_ts=timeseries_generation_dispatchable + ) + + # Before you can now run a power flow analysis and determine grid expansion needs, + # reactive power time series of the loads and generators also need to be set. If you + # simply want to use default configurations, you can do the following. + edisgo_obj.set_time_series_reactive_power_control() + + # Now you are ready to determine grid expansion needs + edisgo_obj.reinforce() # Determine cost for each line/transformer that was reinforced - costs = edisgo.results.grid_expansion_costs + costs = edisgo_obj.results.grid_expansion_costs Time series for loads and fluctuating generators can also be automatically generated using the provided API for the oemof demandlib and the OpenEnergy DataBase: @@ -288,36 +305,43 @@ using the provided API for the oemof demandlib and the OpenEnergy DataBase: import pandas as pd from edisgo import EDisGo - # Set up the EDisGo object using the OpenEnergy DataBase and the oemof - # demandlib to set up time series for loads and fluctuating generators - # (time series for dispatchable generators need to be provided) - timeindex = pd.date_range('1/1/2011', periods=4, freq='H') + # Set up the EDisGo object with generator park scenario NEP2035 and time index + timeindex = pd.date_range("1/1/2011", periods=4, freq="H") + edisgo_obj = EDisGo( + ding0_grid="ding0_example_grid", + generator_scenario="nep2035", + timeindex=timeindex + ) + + # Set up your own time series by load sector and generator type (these are dummy + # time series!) + # Set up active power time series of loads and generators in the grid using prede- + # fined profiles per load sector and technology type + # (There are currently no predefined profiles for dispatchable generators, wherefore + # their feed-in profiles need to be provided) timeseries_generation_dispatchable = pd.DataFrame( - {'biomass': [1] * len(timeindex), - 'coal': [1] * len(timeindex), - 'other': [1] * len(timeindex) + {"biomass": [1] * len(timeindex), + "coal": [1] * len(timeindex), + "other": [1] * len(timeindex) }, - index=timeindex) - - edisgo = EDisGo( - ding0_grid='ding0_example_grid', - generator_scenario='ego100', - timeseries_load='demandlib', - timeseries_generation_fluctuating='oedb', - timeseries_generation_dispatchable=timeseries_generation_dispatchable, - timeindex=timeindex) + index=timeindex + ) + edisgo_obj.set_time_series_active_power_predefined( + conventional_loads_ts="demandlib", + fluctuating_generators_ts="oedb", + dispatchable_generators_ts=timeseries_generation_dispatchable + ) + + # Before you can now run a power flow analysis and determine grid expansion needs, + # reactive power time series of the loads and generators also need to be set. Here, + # default configurations are again used. + edisgo_obj.set_time_series_reactive_power_control() # Do grid reinforcement - edisgo.reinforce() + edisgo_obj.reinforce() # Determine cost for each line/transformer that was reinforced - costs = edisgo.results.grid_expansion_costs - -Parallelization ---------------- - -Try :func:`~.edisgo.tools.edisgo_run.run_edisgo_pool_flexible` for -parallelization of your custom function. + costs = edisgo_obj.results.grid_expansion_costs LICENSE ------- diff --git a/doc/usage_details.rst b/doc/usage_details.rst index db11c19d8..1a0fb754f 100644 --- a/doc/usage_details.rst +++ b/doc/usage_details.rst @@ -151,8 +151,140 @@ of the whole topology or each single grid can be retrieved as follows: The returned graph is a :networkx:`networkx.Graph`, where lines are represented by edges in the graph, and buses and transformers are represented by nodes. -Identify grid issues --------------------- +Component time series +------------------------ + +There are various options how to set active and reactive power time series. First, options +for setting active power time series are explained, followed by options for setting +reactive power time series. +You can also check out the :ref:`edisgo-mwe` section to get a quick start. + +Active power time series +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are various options how to set active time series: + +* "manual": providing your own time series +* "worst-case": using simultaneity factors from config files +* "predefined": using predefined profiles, e.g. standard load profiles +* "optimised": using the LOPF to optimise e.g. vehicle charging +* "heuristic": using heuristics + +.. _active_power_manual: + +Manual +....... + +Use this mode to provide your own time series for specific components. +It can be invoked as follows: + +.. code-block:: python + + edisgo.set_time_series_manual() + +See :attr:`~.edisgo.EDisGo.set_time_series_manual` for more information. + +When using this mode make sure to previously set the time index. This can either be done +upon initialisation of the EDisGo object by providing the input parameter 'timeindex' or +by using the function :attr:`~.edisgo.EDisGo.set_timeindex`. + +Worst-case +........... + +Use this mode to set feed-in and load in heavy load flow case (here called "load_case") +and/or reverse power flow case (here called "feed-in_case") using simultaneity factors +used in conventional grid planning. +It can be invoked as follows: + +.. code-block:: python + + edisgo.set_time_series_worst_case_analysis() + +See :attr:`~.edisgo.EDisGo.set_time_series_worst_case_analysis` for more information. + +When using this mode a fictitious time index starting 1/1/1970 00:00 is automatically set. +This is done because pypsa needs time indeces. To find out which time index corresponds +to which case check out: + +.. code-block:: python + + edisgo.timeseries.timeindex_worst_cases + +Predefined +............. + +Use this mode if you want to set time series by component type. +You may either provide your own time series or use ones provided through the +OpenEnergy DataBase or other python tools. +This mode can be invoked as follows: + +.. code-block:: python + + edisgo.set_time_series_active_power_predefined() + +For the following components you can use existing time series: + +* Fluctuating generators: Feed-in time series for solar and wind power plants can be + retrieved from the `OpenEnergy DataBase `_. +* Conventional loads: Standard load profiles for the different sectors residential, + commercial, agricultural and industrial are generated using the oemof + `demandlib `_. + +For all other components you need to provide your own time series. Time series for +heat pumps cannot be set using this mode. +See :attr:`~.edisgo.EDisGo.set_time_series_active_power_predefined` for more information. + +When using this mode make sure to previously set the time index. This can either be done +upon initialisation of the EDisGo object by providing the input parameter 'timeindex' or +by using the function :attr:`~.edisgo.EDisGo.set_timeindex`. + +Optimised +.......... + +Use this mode to optimise flexibilities, e.g. charging of electric vehicles. + +.. todo:: Add more details once the optimisation is merged. + +Heuristic +.......... + +Use this mode to use heuristics to set time series. So far, only heuristics for +electric vehicle charging are implemented. + +.. todo:: Add more details once the charging strategies are merged. + +Reactive power time series +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are so far two options how to set reactive power time series: + +* "manual": providing your own time series +* "fixed :math:`cos\varphi`": using a fixed power factor + +It is perspectively planned to also provide reactive power controls Q(U) and +:math:`cos\varphi(P)`. + +Manual +....... + +See active power :ref:`active_power_manual` mode documentation. + +Fixed :math:`cos\varphi` +................................ + +Use this mode to set reactive power time series using fixed power factors. +It can be invoked as follows: + +.. code-block:: python + + edisgo.set_time_series_reactive_power_control() + +See :attr:`~.edisgo.EDisGo.set_time_series_reactive_power_control` for more information. + +When using this mode make sure to previously set active power time series. + +Identifying grid issues +------------------------- As detailed in :ref:`edisgo-mwe`, once you set up your scenario by instantiating an :class:`~.EDisGo` object, you are ready for a grid analysis and identifying grid @@ -220,50 +352,62 @@ a time series for the storage unit needs to be provided. from edisgo import EDisGo # Set up EDisGo object - edisgo = EDisGo(ding0_grid=dingo_grid_path, - worst_case_analysis='worst-case') + edisgo = EDisGo(ding0_grid=dingo_grid_path) # Get random bus to connect storage to random_bus = edisgo.topology.buses_df.index[3] # Add storage instance edisgo.add_component( - "StorageUnit", + comp_type="storage_unit", + add_ts=False, bus=random_bus, - p_nom=4) + p_nom=4 + ) + + # Set up worst case time series for loads, generators and storage unit + edisgo.set_time_series_worst_case_analysis() + .. code-block:: python import pandas as pd from edisgo import EDisGo - # Set up the EDisGo object using the OpenEnergy DataBase and the oemof - # demandlib to set up time series for loads and fluctuating generators - # (time series for dispatchable generators need to be provided) - timeindex = pd.date_range('1/1/2011', periods=4, freq='H') - timeseries_generation_dispatchable = pd.DataFrame( - {'biomass': [1] * len(timeindex), - 'coal': [1] * len(timeindex), - 'other': [1] * len(timeindex) - }, - index=timeindex) + # Set up the EDisGo object + timeindex = pd.date_range("1/1/2011", periods=4, freq="H") edisgo = EDisGo( - ding0_grid='ding0_example_grid', - generator_scenario='ego100', - timeseries_load='demandlib', - timeseries_generation_fluctuating='oedb', - timeseries_generation_dispatchable=timeseries_generation_dispatchable, - timeindex=timeindex) + ding0_grid=dingo_grid_path, + generator_scenario="ego100", + timeindex=timeindex + ) - # Get random bus to connect storage to - random_bus = edisgo.topology.buses_df.index[3] - # Add storage instance + # Add time series for loads and generators + timeseries_generation_dispatchable = pd.DataFrame( + {"biomass": [1] * len(timeindex), + "coal": [1] * len(timeindex), + "other": [1] * len(timeindex) + }, + index=timeindex + ) + edisgo.set_time_series_active_power_predefined( + conventional_loads_ts="demandlib", + fluctuating_generators_ts="oedb", + dispatchable_generators_ts=timeseries_generation_dispatchable + ) + edisgo.set_time_series_reactive_power_control() + + # Add storage unit to random bus with time series edisgo.add_component( - "StorageUnit", - bus=random_bus, + comp_type="storage_unit", + bus=edisgo.topology.buses_df.index[3], p_nom=4, ts_active_power=pd.Series( [-3.4, 2.5, -3.4, 2.5], - index=edisgo.timeseries.timeindex)) + index=edisgo.timeseries.timeindex), + ts_reactive_power=pd.Series( + [0., 0., 0., 0.], + index=edisgo.timeseries.timeindex) + ) Following is an example on how to use the OPF to find the optimal storage positions in the grid with regard to grid expansion costs. Storage operation diff --git a/edisgo/__init__.py b/edisgo/__init__.py index 36f1a6c50..d353280c8 100644 --- a/edisgo/__init__.py +++ b/edisgo/__init__.py @@ -1 +1 @@ -from edisgo.edisgo import EDisGo +from edisgo.edisgo import EDisGo # noqa: F401 diff --git a/edisgo/config/config_grid_expansion_default.cfg b/edisgo/config/config_grid_expansion_default.cfg index 572b456dc..4917c2424 100644 --- a/edisgo/config/config_grid_expansion_default.cfg +++ b/edisgo/config/config_grid_expansion_default.cfg @@ -21,7 +21,7 @@ lv_line = NAYY 4x1x150 # allowed voltage deviations # ========================== # relevant for all cases -feedin_case_lower = 0.9 +feed-in_case_lower = 0.9 load_case_upper = 1.1 # COMBINED MV+LV @@ -39,7 +39,7 @@ hv_mv_trafo_control_deviation = 0.0 # mv_lv_max_v_deviation: # max. allowed voltage deviation according to DIN EN 50160 # caution: offset and control deviation at HV-MV station must be considered in calculations! -mv_lv_feedin_case_max_v_deviation = 0.1 +mv_lv_feed-in_case_max_v_deviation = 0.1 mv_lv_load_case_max_v_deviation = 0.1 # MV ONLY @@ -48,25 +48,25 @@ mv_lv_load_case_max_v_deviation = 0.1 # max. allowed voltage deviation in MV grids (load case) mv_load_case_max_v_deviation = 0.015 -# mv_feedin_case_max_v_deviation: -# max. allowed voltage deviation in MV grids (feedin case) +# mv_feed-in_case_max_v_deviation: +# max. allowed voltage deviation in MV grids (feed-in case) # according to BDEW -mv_feedin_case_max_v_deviation = 0.05 +mv_feed-in_case_max_v_deviation = 0.05 # LV ONLY # ------- # max. allowed voltage deviation in LV grids (load case) lv_load_case_max_v_deviation = 0.065 -# max. allowed voltage deviation in LV grids (feedin case) +# max. allowed voltage deviation in LV grids (feed-in case) # according to VDE-AR-N 4105 -lv_feedin_case_max_v_deviation = 0.035 +lv_feed-in_case_max_v_deviation = 0.035 # max. allowed voltage deviation in MV/LV stations (load case) mv_lv_station_load_case_max_v_deviation = 0.02 -# max. allowed voltage deviation in MV/LV stations (feedin case) -mv_lv_station_feedin_case_max_v_deviation = 0.015 +# max. allowed voltage deviation in MV/LV stations (feed-in case) +mv_lv_station_feed-in_case_max_v_deviation = 0.015 [grid_expansion_load_factors] @@ -75,13 +75,13 @@ mv_lv_station_feedin_case_max_v_deviation = 0.015 # Source: Rehtanz et. al.: "Verteilnetzstudie für das Land Baden-Württemberg", 2017. mv_load_case_transformer = 0.5 mv_load_case_line = 0.5 -mv_feedin_case_transformer = 1.0 -mv_feedin_case_line = 1.0 +mv_feed-in_case_transformer = 1.0 +mv_feed-in_case_line = 1.0 lv_load_case_transformer = 1.0 lv_load_case_line = 1.0 -lv_feedin_case_transformer = 1.0 -lv_feedin_case_line = 1.0 +lv_feed-in_case_transformer = 1.0 +lv_feed-in_case_line = 1.0 # costs # ============ diff --git a/edisgo/config/config_timeseries_default.cfg b/edisgo/config/config_timeseries_default.cfg index d2055c466..4a7f3516e 100644 --- a/edisgo/config/config_timeseries_default.cfg +++ b/edisgo/config/config_timeseries_default.cfg @@ -18,21 +18,70 @@ # following values provided by "dena-Verteilnetzstudie. Ausbau- und # Innovationsbedarf der Stromverteilnetze in Deutschland bis 2030", .p. 98 -mv_feedin_case_load = 0.15 -lv_feedin_case_load = 0.1 +# conventional load +# factors taken from "dena-Verteilnetzstudie. Ausbau- und +# Innovationsbedarf der Stromverteilnetze in Deutschland bis 2030", p. 98 +mv_feed-in_case_load = 0.15 +lv_feed-in_case_load = 0.1 mv_load_case_load = 1.0 lv_load_case_load = 1.0 -feedin_case_feedin_pv = 0.85 -feedin_case_feedin_wind = 1 -feedin_case_feedin_other = 1 -load_case_feedin_pv = 0 -load_case_feedin_wind = 0 -load_case_feedin_other = 0 +# generators +# factors taken from "dena-Verteilnetzstudie. Ausbau- und +# Innovationsbedarf der Stromverteilnetze in Deutschland bis 2030", p. 98 +feed-in_case_feed-in_pv = 0.85 +feed-in_case_feed-in_wind = 1.0 +feed-in_case_feed-in_other = 1.0 +load_case_feed-in_pv = 0.0 +load_case_feed-in_wind = 0.0 +load_case_feed-in_other = 0.0 -# temporary own values -feedin_case_storage = 1 -load_case_storage = -1 +# storage units (own values) +feed-in_case_storage = 1.0 +load_case_storage = -1.0 + +# charging points (temporary own values) + +# simultaneity of 0.15 follows assumptions from "dena-Verteilnetzstudie" for conventional loads +mv_feed-in_case_cp_home = 0.15 +mv_feed-in_case_cp_work = 0.15 +mv_feed-in_case_cp_public = 0.15 +mv_feed-in_case_cp_hpc = 0.15 + +# simultaneity in feed-in case is in dena study "Integrierte Energiewende" (p. 90) as well assumed to be zero +lv_feed-in_case_cp_home = 0.0 +lv_feed-in_case_cp_work = 0.0 +lv_feed-in_case_cp_public = 0.0 +lv_feed-in_case_cp_hpc = 0.0 + +# simultaneity in load case should be dependent on number of charging points in the grid +# as well as charging power +# assumed factors for home and work charging higher for LV, as simultaneity of charging +# decreases with the number of charging points + +# simultaneity of 0.2 follows assumptions from dena study "Integrierte Energiewende" (p. 90) where +# simultaneity for 70-500 charging points lies around 20% +mv_load_case_cp_home = 0.2 +mv_load_case_cp_work = 0.2 +mv_load_case_cp_public = 1.0 +mv_load_case_cp_hpc = 1.0 + +lv_load_case_cp_home = 1.0 +lv_load_case_cp_work = 1.0 +lv_load_case_cp_public = 1.0 +lv_load_case_cp_hpc = 1.0 + +# heat pumps (temporary own values) + +# simultaneity in feed-in case is in dena study "Integrierte Energiewende" (p. 90) as well assumed to be zero +mv_feed-in_case_hp = 0.0 +lv_feed-in_case_hp = 0.0 + +# simultaneity in load case should be dependent on number of heat pumps in the grid +# simultaneity of 0.8 follows assumptions from dena study "Integrierte Energiewende" (p. 90) where +# simultaneity for 70-500 heat pumps lies around 80% +mv_load_case_hp = 0.8 +lv_load_case_hp = 1.0 [reactive_power_factor] @@ -43,9 +92,13 @@ load_case_storage = -1 mv_gen = 0.9 mv_load = 0.9 mv_storage = 0.9 +mv_cp = 1.0 +mv_hp = 1.0 lv_gen = 0.95 lv_load = 0.95 lv_storage = 0.95 +lv_cp = 1.0 +lv_hp = 1.0 [reactive_power_mode] @@ -56,9 +109,13 @@ lv_storage = 0.95 mv_gen = inductive mv_load = inductive mv_storage = inductive +mv_cp = inductive +mv_hp = inductive lv_gen = inductive lv_load = inductive lv_storage = inductive +lv_cp = inductive +lv_hp = inductive [demandlib] diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index 282bb99dc..40d94df42 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import os import pickle @@ -20,7 +22,7 @@ if "READTHEDOCS" not in os.environ: from shapely.geometry import Point -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) class EDisGo: @@ -45,160 +47,14 @@ class EDisGo: See :attr:`~.EDisGo.import_generators` for further information on how generators are integrated and what further options there are. Default: None. - - worst_case_analysis : None or :obj:`str`, optional - If not None time series for feed-in and load will be generated - according to the chosen worst case analysis. - Possible options are: - - * 'worst-case' - - Feed-in and load for the two worst-case scenarios feed-in case and - load case are generated. - - * 'worst-case-feedin' - - Feed-in and load for the worst-case scenario feed-in case is - generated. - - * 'worst-case-load' - - Feed-in and load for the worst-case scenario load case is generated. - - Worst case scaling factors for loads and generators are specified in - the config section `worst_case_scale_factor`. - - Be aware that if you choose to conduct a worst-case analysis your - input for all other time series parameters (e.g. - `timeseries_generation_fluctuating`, - `timeseries_generation_dispatchable`, - `timeseries_load`) will not be used. - As eDisGo is designed to work with time series but worst cases - are not time specific, a random time index 1/1/1970 is used. - - Default: None. - - timeseries_generation_fluctuating : :obj:`str` or \ - :pandas:`pandas.DataFrame` or None, optional - Parameter used to obtain time series for active power feed-in of - fluctuating renewables wind and solar. - Possible options are: - - * 'oedb' - - Hourly time series for the year 2011 are obtained from the OpenEnergy - DataBase. See - :func:`edisgo.io.timeseries_import.import_feedin_timeseries` for more - information. - - * :pandas:`pandas.DataFrame` - - DataFrame with time series for active power feed-in, normalized to - a capacity of 1 MW. - - Time series can either be aggregated by technology type or by type - and weather cell ID. In the first case columns of the DataFrame are - 'solar' and 'wind'; in the second case columns need to be a - :pandas:`pandas.MultiIndex` with the first level - containing the type and the second level the weather cell ID. - - Index needs to be a :pandas:`pandas.DatetimeIndex`. - - When importing a ding0 grid and/or using predefined scenarios - of the future generator park (see parameter `generator_scenario`), - each generator has an assigned weather cell ID that identifies the - weather data cell from the weather data set used in the research - project `open_eGo `_ to - determine feed-in profiles. The weather cell ID can be retrieved - from column `weather_cell_id` in - :attr:`~.network.topology.Topology.generators_df` and could be - overwritten to use own weather cells. - - Default: None. - - timeseries_generation_dispatchable : :pandas:`pandas.DataFrame`\ - or None, optional - DataFrame with time series for active power of each - type of dispatchable generator, normalized to a capacity of 1 MW. - - Index needs to be a :pandas:`pandas.DatetimeIndex`. - - Columns represent generator type (e.g. 'gas', 'coal', 'biomass'). - All in the current grid existing generator types can be retrieved - from column `type` in - :attr:`~.network.topology.Topology.generators_df`. - Use 'other' if you don't want to explicitly provide every possible - type. - - Default: None. - timeseries_generation_reactive_power : \ - :pandas:`pandas.DataFrame` or None, optional - Dataframe with time series of normalized reactive power (normalized by - the rated nominal active power) per technology and weather cell. Index - needs to be a :pandas:`pandas.DatetimeIndex`. - Columns represent generator type and can be a MultiIndex containing - the weather cell ID in the second level. If the technology doesn't - contain weather cell information, i.e. if it is not a solar - or wind generator, this second level can be left as a numpy Nan or a - None. - - If no time series for the technology or technology and weather cell ID - is given, reactive power will be calculated from power factor and - power factor mode in the config sections `reactive_power_factor` and - `reactive_power_mode` and a warning will be raised. - - Default: None. - timeseries_load : :obj:`str` or :pandas:`pandas.DataFrame` or \ - None, optional - Parameter used to obtain time series of active power of loads. - Possible options are: - - * 'demandlib' - - Time series for the year specified in input parameter `timeindex` are - generated using standard electric load profiles from the oemof - `demandlib `_. - - * :pandas:`pandas.DataFrame` - - DataFrame with load time series of each type of load - normalized with corresponding annual energy demand. Index needs to - be a :pandas:`pandas.DatetimeIndex`. - Columns represent load type. The in the current grid existing load - types can be retrieved from column `sector` in - :attr:`~.network.topology.Topology.loads_df`. In ding0 grids the - differentiated sectors are 'residential', 'retail', 'industrial', and - 'agricultural'. - - Default: None. - - timeseries_load_reactive_power : :pandas:`pandas.DataFrame` \ - or None, optional - Dataframe with time series of normalized reactive power (normalized by - annual energy demand) per load sector. - - Index needs to be a - :pandas:`pandas.DatetimeIndex`. - - Columns represent load type. The in the current grid existing load - types can be retrieved from column `sector` in - :attr:`~.network.topology.Topology.loads_df`. In ding0 grids the - differentiated sectors are 'residential', 'retail', 'industrial', and - 'agricultural'. - - If no time series for the load sector is given, reactive power will be - calculated from power factor and power factor mode in the config - sections `reactive_power_factor` and `reactive_power_mode` and a - warning will be raised. - - Default: None. - timeindex : None or :pandas:`pandas.DatetimeIndex`, optional - Can be used to select time ranges of the feed-in and load time series - that will be used in the power flow analysis. Also defines the year - load time series are obtained for when choosing the 'demandlib' option - to generate load time series. - + Defines the time steps feed-in and demand time series of all generators, loads + and storage units need to be set. + The time index is for example used as default for time steps considered in + the power flow analysis and when checking the integrity of the network. + Providing a time index is only optional in case a worst case analysis is set + up using :func:`~set_time_series_worst_case_analysis`. + In all other cases a time index needs to be set manually. config_path : None or :obj:`str` or :obj:`dict` Path to the config directory. Options are: @@ -237,7 +93,7 @@ class EDisGo: ---------- topology : :class:`~.network.topology.Topology` The topology is a container object holding the topology of the grids. - timeseries: :class:`~.network.timeseries.TimeSeries` + timeseries : :class:`~.network.timeseries.TimeSeries` Container for component time series. results : :class:`~.network.results.Results` This is a container holding all calculation results from power flow @@ -257,7 +113,9 @@ def __init__(self, **kwargs): # set up results and time series container self.results = Results(self) self.opf_results = OPFResults() - self.timeseries = timeseries.TimeSeries() + self.timeseries = timeseries.TimeSeries( + timeindex=kwargs.get("timeindex", pd.DatetimeIndex([])) + ) # import new generators if kwargs.get("generator_scenario", None) is not None: @@ -265,17 +123,6 @@ def __init__(self, **kwargs): generator_scenario=kwargs.pop("generator_scenario"), **kwargs ) - # set up time series for feed-in and load - # worst-case time series - if kwargs.get("import_timeseries", True): - if kwargs.get("worst_case_analysis", None): - timeseries.get_component_timeseries( - edisgo_obj=self, - mode=kwargs.get("worst_case_analysis", None), - ) - else: - timeseries.get_component_timeseries(edisgo_obj=self, **kwargs) - @property def config(self): """ @@ -307,56 +154,431 @@ def import_ding0_grid(self, path): if path is not None: import_ding0_grid(path, self) - def to_pypsa(self, **kwargs): + def set_timeindex(self, timeindex): """ - Convert to PyPSA network representation. + Sets :py:attr:`~.network.timeseries.TimeSeries.timeindex` all time-dependent + attributes are indexed by. - A network topology representation based on - :pandas:`pandas.DataFrame`. The overall container object of - this data model, the :pypsa:`pypsa.Network`, - is set up. + The time index is for example used as default for time steps considered in + the power flow analysis and when checking the integrity of the network. Parameters - ---------- - kwargs : - See :func:`~.io.pypsa_io.to_pypsa` for further information. + ----------- + timeindex : :pandas:`pandas.DatetimeIndex` + Time index to set. - Returns - ------- - :pypsa:`pypsa.Network` - PyPSA network representation. + """ + self.timeseries.timeindex = timeindex + def set_time_series_manual( + self, + generators_p=None, + loads_p=None, + storage_units_p=None, + generators_q=None, + loads_q=None, + storage_units_q=None, + ): """ - timesteps = kwargs.pop("timesteps", None) - mode = kwargs.get("mode", None) + Sets given component time series. - if timesteps is None: - timesteps = self.timeseries.timeindex - # check if timesteps is array-like, otherwise convert to list - if not hasattr(timesteps, "__len__"): - timesteps = [timesteps] - # export grid - if not mode: - return pypsa_io.to_pypsa(self, timesteps, **kwargs) - elif "mv" in mode: - return pypsa_io.to_pypsa(self.topology.mv_grid, timesteps, **kwargs) - elif mode == "lv": - lv_grid_name = kwargs.get("lv_grid_name", None) - if not lv_grid_name: - raise ValueError( - "For exporting lv grids, name of lv_grid has to be provided." - ) - return pypsa_io.to_pypsa( - self.topology._grids[lv_grid_name], - mode=mode, - timesteps=timesteps, + If time series for a component were already set before, they are overwritten. + + Parameters + ----------- + generators_p : :pandas:`pandas.DataFrame` + Active power time series in MW of generators. Index of the data frame is + a datetime index. Columns contain generators names of generators to set + time series for. Default: None. + loads_p : :pandas:`pandas.DataFrame` + Active power time series in MW of loads. Index of the data frame is + a datetime index. Columns contain load names of loads to set + time series for. Default: None. + storage_units_p : :pandas:`pandas.DataFrame` + Active power time series in MW of storage units. Index of the data frame is + a datetime index. Columns contain storage unit names of storage units to set + time series for. Default: None. + generators_q : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of generators. Index of the data frame is + a datetime index. Columns contain generators names of generators to set + time series for. Default: None. + loads_q : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of loads. Index of the data frame is + a datetime index. Columns contain load names of loads to set + time series for. Default: None. + storage_units_q : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of storage units. Index of the data frame + is a datetime index. Columns contain storage unit names of storage units to + set time series for. Default: None. + + Notes + ------ + This function raises a warning in case a time index was not previously set. + You can set the time index upon initialisation of the EDisGo object by + providing the input parameter 'timeindex' or using the function + :attr:`~.edisgo.EDisGo.set_timeindex`. + Also make sure that the time steps for which time series are provided include + the set time index. + + """ + # check if time index is already set, otherwise raise warning + if self.timeseries.timeindex.empty: + logger.warning( + "When setting time series manually a time index is not automatically " + "set but needs to be set by the user. You can set the time index " + "upon initialisation of the EDisGo object by providing the input " + "parameter 'timeindex' or using the function EDisGo.set_timeindex()." + ) + self.timeseries.set_active_power_manual( + self, + ts_generators=generators_p, + ts_loads=loads_p, + ts_storage_units=storage_units_p, + ) + self.timeseries.set_reactive_power_manual( + self, + ts_generators=generators_q, + ts_loads=loads_q, + ts_storage_units=storage_units_q, + ) + + def set_time_series_worst_case_analysis( + self, + cases=None, + generators_names=None, + loads_names=None, + storage_units_names=None, + ): + """ + Sets demand and feed-in of all loads, generators and storage units for the + specified worst cases. + + See :func:`~.network.timeseries.TimeSeries.set_worst_case` for more information. + + Parameters + ----------- + cases : str or list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. Defaults to None in which case both + 'feed-in_case' and 'load_case' are set up. + generators_names : list(str) + Defines for which generators to set worst case time series. If None, + time series are set for all generators. Default: None. + loads_names : list(str) + Defines for which loads to set worst case time series. If None, + time series are set for all loads. Default: None. + storage_units_names : list(str) + Defines for which storage units to set worst case time series. If None, + time series are set for all storage units. Default: None. + + """ + if cases is None: + cases = ["load_case", "feed-in_case"] + if isinstance(cases, str): + cases = [cases] + + self.timeseries.set_worst_case( + self, cases, generators_names, loads_names, storage_units_names + ) + + def set_time_series_active_power_predefined( + self, + fluctuating_generators_ts=None, + fluctuating_generators_names=None, + dispatchable_generators_ts=None, + dispatchable_generators_names=None, + conventional_loads_ts=None, + conventional_loads_names=None, + charging_points_ts=None, + charging_points_names=None, + ): + """ + Uses predefined feed-in or demand profiles. + + Predefined profiles comprise i.e. standard electric conventional load profiles + for different sectors generated using the oemof + `demandlib `_ or feed-in time series of + fluctuating solar and wind generators provided on the OpenEnergy DataBase for + the weather year 2011. + + This function can also be used to provide your own profiles per technology or + load sector. + + Parameters + ----------- + fluctuating_generators_ts : str or :pandas:`pandas.DataFrame` + Defines which technology-specific (or technology and weather cell specific) + time series to use to set active power time series of fluctuating + generators. See parameter `ts_generators` in + :func:`~.network.timeseries.TimeSeries.predefined_fluctuating_generators_by_technology` + for more information. If None, no time series of fluctuating generators + are set. Default: None. + fluctuating_generators_names : list(str) + Defines for which fluctuating generators to apply technology-specific time + series. See parameter `generator_names` in + :func:`~.network.timeseries.TimeSeries.predefined_dispatchable_generators_by_technology` + for more information. Default: None. + dispatchable_generators_ts : :pandas:`pandas.DataFrame` + Defines which technology-specific time series to use to set active power + time series of dispatchable generators. + See parameter `ts_generators` in + :func:`~.network.timeseries.TimeSeries.predefined_dispatchable_generators_by_technology` + for more information. If None, no time series of dispatchable generators + are set. Default: None. + dispatchable_generators_names : list(str) + Defines for which dispatchable generators to apply technology-specific time + series. See parameter `generator_names` in + :func:`~.network.timeseries.TimeSeries.predefined_dispatchable_generators_by_technology` + for more information. Default: None. + conventional_loads_ts : :pandas:`pandas.DataFrame` + Defines which sector-specific time series to use to set active power + time series of conventional loads. + See parameter `ts_loads` in + :func:`~.network.timeseries.TimeSeries.predefined_conventional_loads_by_sector` + for more information. If None, no time series of conventional loads + are set. Default: None. + conventional_loads_names : list(str) + Defines for which conventional loads to apply technology-specific time + series. See parameter `load_names` in + :func:`~.network.timeseries.TimeSeries.predefined_conventional_loads_by_sector` + for more information. Default: None. + charging_points_ts : :pandas:`pandas.DataFrame` + Defines which use-case-specific time series to use to set active power + time series of charging points. + See parameter `ts_loads` in + :func:`~.network.timeseries.TimeSeries.predefined_charging_points_by_use_case` + for more information. If None, no time series of charging points + are set. Default: None. + charging_points_names : list(str) + Defines for which charging points to apply use-case-specific time + series. See parameter `load_names` in + :func:`~.network.timeseries.TimeSeries.predefined_charging_points_by_use_case` + for more information. Default: None. + + Notes + ------ + This function raises a warning in case a time index was not previously set. + You can set the time index upon initialisation of the EDisGo object by + providing the input parameter 'timeindex' or using the function + :attr:`~.edisgo.EDisGo.set_timeindex`. + Also make sure that the time steps for which time series are provided include + the set time index. + + """ + if self.timeseries.timeindex.empty: + logger.warning( + "When setting time series using predefined profiles a time index is " + "not automatically set but needs to be set by the user. In some cases " + "not setting a time index prior to calling this function may lead " + "to errors. You can set the time index upon initialisation of the " + "EDisGo object by providing the input parameter 'timeindex' or using " + "the function EDisGo.set_timeindex()." + ) + return + if fluctuating_generators_ts is not None: + self.timeseries.predefined_fluctuating_generators_by_technology( + self, fluctuating_generators_ts, fluctuating_generators_names + ) + if dispatchable_generators_ts is not None: + self.timeseries.predefined_dispatchable_generators_by_technology( + self, dispatchable_generators_ts, dispatchable_generators_names + ) + if conventional_loads_ts is not None: + self.timeseries.predefined_conventional_loads_by_sector( + self, conventional_loads_ts, conventional_loads_names + ) + if charging_points_ts is not None: + self.timeseries.predefined_charging_points_by_use_case( + self, charging_points_ts, charging_points_names + ) + + def set_time_series_reactive_power_control( + self, + control="fixed_cosphi", + generators_parametrisation="default", + loads_parametrisation="default", + storage_units_parametrisation="default", + ): + """ + Set reactive power time series of components. + + Parameters + ----------- + control : str + Type of reactive power control to apply. Currently the only option is + 'fixed_coshpi'. See :func:`~.network.timeseries.TimeSeries.fixed_cosphi` + for further information. + generators_parametrisation : str or :pandas:`pandas.DataFrame` + See parameter `generators_parametrisation` in + :func:`~.network.timeseries.TimeSeries.fixed_cosphi` for further + information. Here, per default, the option 'default' is used. + loads_parametrisation : str or :pandas:`pandas.DataFrame` + See parameter `loads_parametrisation` in + :func:`~.network.timeseries.TimeSeries.fixed_cosphi` for further + information. Here, per default, the option 'default' is used. + storage_units_parametrisation : str or :pandas:`pandas.DataFrame` + See parameter `storage_units_parametrisation` in + :func:`~.network.timeseries.TimeSeries.fixed_cosphi` for further + information. Here, per default, the option 'default' is used. + + Notes + ------ + Be careful to set parametrisation of other component types to None if you only + want to set reactive power of certain components. See example below for further + information. + + Examples + -------- + To only set reactive power time series of one generator using default + configurations you can do the following: + + >>> self.set_time_series_reactive_power_control( + >>> generators_parametrisation=pd.DataFrame( + >>> { + >>> "components": [["Generator_1"]], + >>> "mode": ["default"], + >>> "power_factor": ["default"], + >>> }, + >>> index=[1], + >>> ), + >>> loads_parametrisation=None, + >>> storage_units_parametrisation=None + >>> ) + + In the example above, `loads_parametrisation` and + `storage_units_parametrisation` need to be set to None, otherwise already + existing time series would be overwritten. + + To only change configuration of one load and for all other components use + default configurations you can do the following: + + >>> self.set_time_series_reactive_power_control( + >>> loads_parametrisation=pd.DataFrame( + >>> { + >>> "components": [["Load_1"], + >>> self.topology.loads_df.index.drop(["Load_1"])], + >>> "mode": ["capacitive", "default"], + >>> "power_factor": [0.98, "default"], + >>> }, + >>> index=[1, 2], + >>> ) + >>> ) + + In the example above, `generators_parametrisation` and + `storage_units_parametrisation` do not need to be set as default configurations + are per default used for all generators and storage units anyways. + + """ + if control == "fixed_cosphi": + self.timeseries.fixed_cosphi( + self, + generators_parametrisation=generators_parametrisation, + loads_parametrisation=loads_parametrisation, + storage_units_parametrisation=storage_units_parametrisation, ) else: - raise ValueError("The entered mode is not a valid option.") + raise ValueError("'control' must be 'fixed_cosphi'.") + + def to_pypsa( + self, mode=None, timesteps=None, check_edisgo_integrity=False, **kwargs + ): + """ + Convert grid to :pypsa:`PyPSA.Network` representation. + + You can choose between translation of the MV and all underlying LV grids + (mode=None (default)), the MV network only (mode='mv' or mode='mvlv') or a + single LV network (mode='lv'). + + Parameters + ----------- + mode : str + Determines network levels that are translated to + :pypsa:`PyPSA.Network`. + Possible options are: + + * None + + MV and underlying LV networks are exported. This is the default. + + * 'mv' + + Only MV network is exported. MV/LV transformers are not exported in + this mode. Loads, generators and storage units in underlying LV grids + are connected to the respective MV/LV station's primary side. Per + default, they are all connected separately, but you can also choose to + aggregate them. See parameters `aggregate_loads`, `aggregate_generators` + and `aggregate_storages` for more information. + + * 'mvlv' + + This mode works similar as mode 'mv', with the difference that MV/LV + transformers are as well exported and LV components connected to the + respective MV/LV station's secondary side. Per default, all components + are connected separately, but you can also choose to aggregate them. + See parameters `aggregate_loads`, `aggregate_generators` + and `aggregate_storages` for more information. + + * 'lv' + + Single LV network topology including the MV/LV transformer is exported. + The LV grid to export is specified through the parameter `lv_grid_name`. + The slack is positioned at the secondary side of the MV/LV station. + + timesteps : :pandas:`pandas.DatetimeIndex` or \ + :pandas:`pandas.Timestamp` + Specifies which time steps to export to pypsa representation to e.g. + later on use in power flow analysis. It defaults to None in which case + all time steps in :attr:`~.network.timeseries.TimeSeries.timeindex` + are used. + Default: None. + check_edisgo_integrity : bool + Check integrity of edisgo object before translating to pypsa. This option is + meant to help the identification of possible sources of errors if the power + flow calculations fail. See :attr:`~.edisgo.EDisGo.check_integrity` for + more information. + + Other Parameters + ------------------- + use_seed : bool + Use a seed for the initial guess for the Newton-Raphson algorithm. + Only available when MV level is included in the power flow analysis. + If True, uses voltage magnitude results of previous power flow + analyses as initial guess in case of PQ buses. PV buses currently do + not occur and are therefore currently not supported. + Default: False. + lv_grid_name : str + String representative of LV grid to export in case mode is 'lv'. + aggregate_loads : str + Mode for load aggregation in LV grids in case mode is 'mv' or 'mvlv'. + Can be 'sectoral' aggregating the loads sector-wise, 'all' aggregating all + loads into one or None, not aggregating loads but appending them to the + station one by one. Default: None. + aggregate_generators : str + Mode for generator aggregation in LV grids in case mode is 'mv' or 'mvlv'. + Can be 'type' aggregating generators per generator type, 'curtailable' + aggregating 'solar' and 'wind' generators into one and all other generators + into another one, or None, where no aggregation is undertaken + and generators are added to the station one by one. Default: None. + aggregate_storages : str + Mode for storage unit aggregation in LV grids in case mode is 'mv' or + 'mvlv'. Can be 'all' where all storage units in an LV grid are aggregated to + one storage unit or None, in which case no aggregation is conducted and + storage units are added to the station. Default: None. + + Returns + ------- + :pypsa:`PyPSA.Network` + :pypsa:`PyPSA.Network` representation. + + """ + # possibly execute consistency check + if check_edisgo_integrity or logger.level == logging.DEBUG: + self.check_integrity() + return pypsa_io.to_pypsa(self, mode, timesteps, **kwargs) def to_graph(self): """ - Returns graph representation of the grid. + Returns networkx graph representation of the grid. Returns ------- @@ -369,21 +591,6 @@ def to_graph(self): return self.topology.to_graph() - # def curtail(self, methodology, curtailment_timeseries, **kwargs): - # """ - # Sets up curtailment time series. - # - # Curtailment time series are written into - # :class:`~.network.network.TimeSeries`. See - # :class:`~.network.network.CurtailmentControl` for more information on - # parameters and methodologies. - # - # # """ - # raise NotImplementedError - # # CurtailmentControl(edisgo=self, methodology=methodology, - # # curtailment_timeseries=curtailment_timeseries, - # # mode=kwargs.pop('mode', None), **kwargs) - def import_generators(self, generator_scenario=None, **kwargs): """ Gets generator park for specified scenario and integrates them into @@ -442,30 +649,74 @@ def import_generators(self, generator_scenario=None, **kwargs): edisgo_object=self, generator_scenario=generator_scenario, **kwargs ) - def analyze(self, mode=None, timesteps=None, **kwargs): - """Conducts a static, non-linear power flow analysis + def analyze(self, mode=None, timesteps=None, raise_not_converged=True, **kwargs): + """ + Conducts a static, non-linear power flow analysis. Conducts a static, non-linear power flow analysis using - `PyPSA `_ + `PyPSA `_ and writes results (active, reactive and apparent power as well as - current on lines and voltages at buses) to - :class:`~.network.results.Results` + current on lines and voltages at buses) to :class:`~.network.results.Results` (e.g. :attr:`~.network.results.Results.v_res` for voltages). - See :func:`~.io.pypsa_io.to_pypsa` for more information. Parameters ---------- - mode : str - Allows to toggle between power flow analysis (PFA) on the whole - network topology (default: None), only MV ('mv' or 'mvlv') or only - LV ('lv'). Defaults to None which equals power flow analysis for - MV + LV. + mode : str or None + Allows to toggle between power flow analysis for the whole network or just + the MV or one LV grid. Possible options are: + + * None (default) + + Power flow analysis is conducted for the whole network including MV grid + and underlying LV grids. + + * 'mv' + + Power flow analysis is conducted for the MV level only. LV loads, + generators and storage units are aggregated at the respective MV/LV + stations' primary side. Per default, they are all connected separately, + but you can also choose to aggregate them. See parameters + `aggregate_loads`, `aggregate_generators` and `aggregate_storages` + in :attr:`~.edisgo.EDisGo.to_pypsa` for more information. + + * 'mvlv' + + Power flow analysis is conducted for the MV level only. In contrast to + mode 'mv' LV loads, generators and storage units are in this case + aggregated at the respective MV/LV stations' secondary side. Per + default, they are all connected separately, but you can also choose to + aggregate them. See parameters `aggregate_loads`, `aggregate_generators` + and `aggregate_storages` in :attr:`~.edisgo.EDisGo.to_pypsa` for more + information. + + * 'lv' + + Power flow analysis is conducted for one LV grid only. Name of the LV + grid to conduct power flow analysis for needs to be provided through + keyword argument 'lv_grid_name' as string. + The slack is positioned at the secondary side of the MV/LV station. + timesteps : :pandas:`pandas.DatetimeIndex` or \ :pandas:`pandas.Timestamp` Timesteps specifies for which time steps to conduct the power flow - analysis. It defaults to None in which case the time steps in - :attr:`~.network.timeseries.TimeSeries.timeindex` are - used. + analysis. It defaults to None in which case all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex` are used. + raise_not_converged : bool + If True, an error is raised in case power flow analysis did not converge + for all time steps. + Default: True. + + Other Parameters + ----------------- + kwargs : dict + Possible other parameters comprise all other parameters that can be set in + :func:`edisgo.io.pypsa_io.to_pypsa`. + + Returns + -------- + :pandas:`pandas.DatetimeIndex` + Returns the time steps for which power flow analysis did not converge. """ if timesteps is None: @@ -479,40 +730,108 @@ def analyze(self, mode=None, timesteps=None, **kwargs): # run power flow analysis pf_results = pypsa_network.pf(timesteps, use_seed=kwargs.get("use_seed", False)) - if all(pf_results["converged"]["0"].tolist()): - pypsa_io.process_pfa_results(self, pypsa_network, timesteps) - else: + # get converged and not converged time steps + timesteps_converged = pf_results["converged"][ + pf_results["converged"]["0"] + ].index + timesteps_not_converged = pf_results["converged"][ + ~pf_results["converged"]["0"] + ].index + + if raise_not_converged and len(timesteps_not_converged) > 0: raise ValueError( "Power flow analysis did not converge for the " - "following time steps: {}.".format( - timesteps[~pf_results["converged"]["0"]].tolist() + "following {} time steps: {}.".format( + len(timesteps_not_converged), timesteps_not_converged + ) + ) + elif len(timesteps_not_converged) > 0: + logger.warning( + "Power flow analysis did not converge for the " + "following {} time steps: {}.".format( + len(timesteps_not_converged), timesteps_not_converged ) ) - def reinforce(self, **kwargs): + # handle converged time steps + pypsa_io.process_pfa_results(self, pypsa_network, timesteps_converged) + + return timesteps_not_converged + + def reinforce( + self, + timesteps_pfa: str | pd.DatetimeIndex | pd.Timestamp | None = None, + copy_grid: bool = False, + max_while_iterations: int = 20, + combined_analysis: bool = False, + mode: str | None = None, + **kwargs, + ) -> Results: """ Reinforces the network and calculates network expansion costs. + If the :attr:`edisgo.network.timeseries.TimeSeries.is_worst_case` is + True input for `timesteps_pfa` and `mode` are overwritten and therefore + ignored. + See :func:`edisgo.flex_opt.reinforce_grid.reinforce_grid` for more - information. + information on input parameters and methodology. + + Other Parameters + ----------------- + is_worst_case : bool + Is used to overwrite the return value from + :attr:`edisgo.network.timeseries.TimeSeries.is_worst_case`. If True + reinforcement is calculated for worst-case MV and LV cases separately. """ + if kwargs.get("is_worst_case", self.timeseries.is_worst_case): + + logger.info( + "Running reinforcement in worst-case mode by differentiating between mv" + " and lv load and feed-in cases." + ) + + timeindex_worst_cases = self.timeseries.timeindex_worst_cases + timesteps_pfa = pd.DatetimeIndex( + timeindex_worst_cases.loc[ + timeindex_worst_cases.index.str.contains("mv") + ] + ) + mode = "mv" + + reinforce_grid( + self, + max_while_iterations=max_while_iterations, + copy_grid=copy_grid, + timesteps_pfa=timesteps_pfa, + combined_analysis=combined_analysis, + mode=mode, + ) + + timesteps_pfa = pd.DatetimeIndex( + timeindex_worst_cases.loc[ + timeindex_worst_cases.index.str.contains("lv") + ] + ) + mode = "lv" + results = reinforce_grid( self, - max_while_iterations=kwargs.get("max_while_iterations", 10), - copy_grid=kwargs.get("copy_grid", False), - timesteps_pfa=kwargs.get("timesteps_pfa", None), - combined_analysis=kwargs.get("combined_analysis", False), - mode=kwargs.get("mode", None), + max_while_iterations=max_while_iterations, + copy_grid=copy_grid, + timesteps_pfa=timesteps_pfa, + combined_analysis=combined_analysis, + mode=mode, ) # add measure to Results object - if not kwargs.get("copy_grid", False): + if not copy_grid: self.results.measures = "grid_expansion" return results - def perform_mp_opf(self, timesteps, storage_series=[], **kwargs): + def perform_mp_opf(self, timesteps, storage_series=None, **kwargs): """ Run optimal power flow with julia. @@ -530,433 +849,445 @@ def perform_mp_opf(self, timesteps, storage_series=[], **kwargs): Status of optimization. """ - status = run_mp_opf(self, timesteps, storage_series=storage_series, **kwargs) - return status + if storage_series is None: + storage_series = [] + return run_mp_opf(self, timesteps, storage_series=storage_series, **kwargs) - def aggregate_components( + def add_component( self, - mode="by_component_type", - aggregate_generators_by_cols=["bus"], - aggregate_loads_by_cols=["bus"], - aggregate_charging_points_by_cols=["bus"], + comp_type, + ts_active_power=None, + ts_reactive_power=None, + **kwargs, ): """ - Aggregates generators, loads and charging points at the same bus. + Adds single component to network. - There are several options how to aggregate. By default all components - of the same type are aggregated separately. You can specify further - columns to consider in the aggregation, such as the generator type - or the load sector. - - Be aware that by aggregating components you lose some information - e.g. on load sector or charging point use case. + Components can be lines or buses as well as generators, loads, or storage units. + If add_ts is set to True, time series of elements are set as well. Currently, + time series need to be provided. Parameters - ----------- - mode : str - Valid options are 'by_component_type' and 'by_load_and_generation'. - In case of aggregation 'by_component_type' generators, loads and - charging points are aggregated separately, by the respectively - specified columns, given in `aggregate_generators_by_cols`, - `aggregate_loads_by_cols`, and `aggregate_charging_points_by_cols`. - In case of aggregation 'by_load_and_generation', all loads and - charging points at the same bus are aggregated. Input in - `aggregate_loads_by_cols` and `aggregate_charging_points_by_cols` - is ignored. Generators are aggregated by the columns specified in - `aggregate_generators_by_cols`. - aggregate_generators_by_cols : list(str) - List of columns to aggregate generators at the same bus by. Valid - columns are all columns in - :attr:`~.network.topology.Topology.generators_df`. - aggregate_loads_by_cols : list(str) - List of columns to aggregate loads at the same bus by. Valid - columns are all columns in - :attr:`~.network.topology.Topology.loads_df`. - aggregate_charging_points_by_cols : list(str) - List of columns to aggregate charging points at the same bus by. - Valid columns are all columns in - :attr:`~.network.topology.Topology.charging_points_df`. + ---------- + comp_type : str + Type of added component. Can be 'bus', 'line', 'load', 'generator', or + 'storage_unit'. + ts_active_power : :pandas:`pandas.Series` or None + Active power time series of added component. + Index of the series must contain all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex`. + Values are active power per time step in MW. + Defaults to None in which case no time series is set. + ts_reactive_power : :pandas:`pandas.Series` or str or None + Possible options are: + + * :pandas:`pandas.Series` + + Reactive power time series of added component. Index of the series must + contain all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex`. Values are reactive + power per time step in MVA. + + * "default" + + Reactive power time series is determined based on assumptions on fixed + power factor of the component. To this end, the power factors set in the + config section `reactive_power_factor` and the power factor mode, + defining whether components behave inductive or capacitive, given in the + config section `reactive_power_mode`, are used. + This option requires you to provide an active power time series. In case + it was not provided, reactive power cannot be set and a warning is + raised. + + * None + + No reactive power time series is set. + + Default: None + **kwargs: dict + Attributes of added component. See respective functions for required + entries. + + * 'bus' : :attr:`~.network.topology.Topology.add_bus` + + * 'line' : :attr:`~.network.topology.Topology.add_line` + + * 'load' : :attr:`~.network.topology.Topology.add_load` + + * 'generator' : :attr:`~.network.topology.Topology.add_generator` + + * 'storage_unit' : :attr:`~.network.topology.Topology.add_storage_unit` """ - # aggregate generators at the same bus - if mode == "by_component_type" or mode == "by_load_and_generation": - if not self.topology.generators_df.empty: - gens_groupby = self.topology.generators_df.groupby( - aggregate_generators_by_cols - ) - naming = "Generators_{}" - # set up new generators_df - gens_df_grouped = gens_groupby.sum().reset_index() - gens_df_grouped["name"] = gens_df_grouped.apply( - lambda _: naming.format( - "_".join(_.loc[aggregate_generators_by_cols]) - ), - axis=1, + # ToDo: Add option to add transformer. + # Todo: change into add_components to allow adding of several components + # at a time, change topology.add_load etc. to add_loads, where + # lists of parameters can be inserted + + def _get_q_default_df(comp_name): + return pd.DataFrame( + { + "components": [[comp_name]], + "mode": ["default"], + "power_factor": ["default"], + }, + index=["comp"], + ) + + def _set_timeseries(): + if ts_active_power is not None: + self.set_time_series_manual( + **{f"{comp_type}s_p": pd.DataFrame({comp_name: ts_active_power})} ) - gens_df_grouped["control"] = "PQ" - gens_df_grouped["control"] = "misc" - if "weather_cell_id" in gens_df_grouped.columns: - gens_df_grouped.drop(columns=["weather_cell_id"], inplace=True) - self.topology.generators_df = gens_df_grouped.set_index("name") - # set up new generator time series - groups = gens_groupby.groups - if isinstance(list(groups.keys())[0], tuple): - self.timeseries.generators_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.generators_active_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.generators_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.generators_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - else: - self.timeseries.generators_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.generators_active_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.generators_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.generators_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } + if ts_reactive_power is not None: + if isinstance(ts_reactive_power, pd.Series): + self.set_time_series_manual( + **{ + f"{comp_type}s_q": pd.DataFrame( + {comp_name: ts_reactive_power} ) - for k, v in groups.items() - ], - axis=1, + } ) + elif ts_reactive_power == "default": + if ts_active_power is None: + logging.warning( + f"Default reactive power time series of {comp_name} cannot " + "be set as active power time series was not provided." + ) + else: + other_comps = [ + _ + for _ in ["generator", "load", "storage_unit"] + if _ != comp_type + ] + parameter_dict = { + f"{t}s_parametrisation": None for t in other_comps + } + parameter_dict.update( + { + f"{comp_type}s_parametrisation": _get_q_default_df( + comp_name + ) + } + ) + self.set_time_series_reactive_power_control(**parameter_dict) - # aggregate conventional loads at the same bus and charging points - # at the same bus separately - if mode == "by_component_type": + if comp_type == "bus": + comp_name = self.topology.add_bus(**kwargs) - # conventional loads - if not self.topology.loads_df.empty: - loads_df = self.topology.loads_df.loc[ - self.topology.loads_df.type.isin(["load", ""]) - ] - loads_groupby = loads_df.groupby(aggregate_loads_by_cols) - naming = "Loads_{}" - - # set up new loads_df - loads_df_grouped = loads_groupby.sum().reset_index() - loads_df_grouped["name"] = loads_df_grouped.apply( - lambda _: naming.format("_".join(_.loc[aggregate_loads_by_cols])), - axis=1, + elif comp_type == "line": + comp_name = self.topology.add_line(**kwargs) + + elif comp_type == "generator": + comp_name = self.topology.add_generator(**kwargs) + _set_timeseries() + + elif comp_type == "storage_unit": + comp_name = self.topology.add_storage_unit(**kwargs) + _set_timeseries() + + elif comp_type == "load": + comp_name = self.topology.add_load(**kwargs) + _set_timeseries() + + else: + raise ValueError( + "Invalid input for parameter 'comp_type'. Must either be " + "'line', 'bus', 'generator', 'load' or 'storage_unit'." + ) + return comp_name + + def integrate_component_based_on_geolocation( + self, + comp_type, + geolocation, + voltage_level=None, + add_ts=True, + ts_active_power=None, + ts_reactive_power=None, + **kwargs, + ): + """ + Adds single component to topology based on geolocation. + + Currently components can be generators or charging points. + + Parameters + ---------- + comp_type : str + Type of added component. Can be 'generator' or 'charging_point'. + geolocation : :shapely:`shapely.Point` or tuple + Geolocation of the new component. In case of tuple, the geolocation + must be given in the form (longitude, latitude). + voltage_level : int, optional + Specifies the voltage level the new component is integrated in. + Possible options are 4 (MV busbar), 5 (MV grid), 6 (LV busbar) or + 7 (LV grid). If no voltage level is provided the voltage level + is determined based on the nominal power `p_nom` (given as kwarg) + as follows: + + * voltage level 4 (MV busbar): nominal power between 4.5 MW and + 17.5 MW + * voltage level 5 (MV grid) : nominal power between 0.3 MW and + 4.5 MW + * voltage level 6 (LV busbar): nominal power between 0.1 MW and + 0.3 MW + * voltage level 7 (LV grid): nominal power below 0.1 MW + + add_ts : bool, optional + Indicator if time series for component are added as well. + Default: True. + ts_active_power : :pandas:`pandas.Series`, optional + Active power time series of added component. Index of the series + must contain all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex`. + Values are active power per time step in MW. If you want + to add time series (if `add_ts` is True), you must provide a + time series. It is not automatically retrieved. + ts_reactive_power : :pandas:`pandas.Series`, optional + Reactive power time series of added component. Index of the series + must contain all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex`. + Values are reactive power per time step in MVA. If you + want to add time series (if `add_ts` is True), you must provide a + time series. It is not automatically retrieved. + + Other Parameters + ------------------ + kwargs : + Attributes of added component. + See :attr:`~.network.topology.Topology.add_generator` respectively + :attr:`~.network.topology.Topology.add_charging_point` methods + for more information on required and optional parameters of + generators and charging points. + + """ + supported_voltage_levels = {4, 5, 6, 7} + p_nom = kwargs.get("p_nom", None) + p_set = kwargs.get("p_set", None) + + p = p_nom if p_set is None else p_set + + kwargs["p"] = p + + if voltage_level not in supported_voltage_levels: + if p is None: + raise ValueError( + "Neither appropriate voltage level nor nominal power " + "were supplied." ) + # Determine voltage level manually from nominal power + if 4.5 < p <= 17.5: + voltage_level = 4 + elif 0.3 < p <= 4.5: + voltage_level = 5 + elif 0.1 < p <= 0.3: + voltage_level = 6 + elif 0 < p <= 0.1: + voltage_level = 7 + else: + raise ValueError("Unsupported voltage level") - loads_df_grouped = loads_df_grouped.assign(type="load") + # check if geolocation is given as shapely Point, otherwise transform + # to shapely Point + if type(geolocation) is not Point: + geolocation = Point(geolocation) - self.topology.loads_df.drop(index=loads_df.index, inplace=True) + # Connect in MV + if voltage_level in [4, 5]: + kwargs["voltage_level"] = voltage_level + kwargs["geom"] = geolocation + comp_name = self.topology.connect_to_mv(self, kwargs, comp_type) - self.topology.loads_df = self.topology.loads_df.append( - loads_df_grouped.set_index("name") + # Connect in LV + else: + substations = self.topology.buses_df.loc[ + self.topology.transformers_df.bus1.unique() + ] + nearest_substation, _ = find_nearest_bus(geolocation, substations) + kwargs["mvlv_subst_id"] = int(nearest_substation.split("_")[-2]) + kwargs["geom"] = geolocation + kwargs["voltage_level"] = voltage_level + comp_name = self.topology.connect_to_lv(self, kwargs, comp_type) + + if add_ts: + if comp_type == "generator": + self.set_time_series_manual( + generators_p=pd.DataFrame({comp_name: ts_active_power}), + generators_q=pd.DataFrame({comp_name: ts_reactive_power}), + ) + else: + self.set_time_series_manual( + loads_p=pd.DataFrame({comp_name: ts_active_power}), + loads_q=pd.DataFrame({comp_name: ts_reactive_power}), ) - # set up new loads time series - groups = loads_groupby.groups - - if isinstance(list(groups.keys())[0], tuple): - self.timeseries.loads_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.loads_active_power.loc[:, v].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.loads_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.loads_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, + return comp_name + + def remove_component(self, comp_type, comp_name, drop_ts=True): + """ + Removes single component from network. + + Components can be lines or buses as well as generators, loads, or storage units. + If drop_ts is set to True, time series of elements are deleted as well. + + Parameters + ---------- + comp_type : str + Type of removed component. Can be 'bus', 'line', 'load', 'generator', or + 'storage_unit'. + comp_name : str + Name of component to be removed. + drop_ts : bool + Indicator if time series for component are removed as well. Defaults + to True. + + """ + # Todo: change into remove_components, when add_component is changed into + # add_components, to allow removal of several components at a time + + if comp_type == "bus": + self.topology.remove_bus(comp_name) + + elif comp_type == "line": + self.topology.remove_line(comp_name) + + elif comp_type == "load": + self.topology.remove_load(comp_name) + if drop_ts: + for ts in ["active_power", "reactive_power"]: + timeseries.drop_component_time_series( + obj=self.timeseries, df_name=f"loads_{ts}", comp_names=comp_name ) - else: - self.timeseries.loads_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.loads_active_power.loc[:, v].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, + + elif comp_type == "generator": + self.topology.remove_generator(comp_name) + if drop_ts: + for ts in ["active_power", "reactive_power"]: + timeseries.drop_component_time_series( + obj=self.timeseries, + df_name=f"generators_{ts}", + comp_names=comp_name, ) - self.timeseries.loads_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.loads_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, + + elif comp_type == "storage_unit": + self.topology.remove_storage_unit(comp_name) + if drop_ts: + for ts in ["active_power", "reactive_power"]: + timeseries.drop_component_time_series( + obj=self.timeseries, + df_name=f"storage_units_{ts}", + comp_names=comp_name, ) - # charging points - if not self.topology.charging_points_df.empty: - loads_groupby = self.topology.charging_points_df.groupby( - aggregate_charging_points_by_cols - ) - naming = "ChargingPoints_{}" - - # set up new charging_points_df - loads_df_grouped = loads_groupby.sum().reset_index() - loads_df_grouped["name"] = loads_df_grouped.apply( - lambda _: naming.format( - "_".join(_.loc[aggregate_charging_points_by_cols]) - ), - axis=1, - ) + else: + raise ValueError("Component type is not correct.") + + def aggregate_components( + self, + aggregate_generators_by_cols=None, + aggregate_loads_by_cols=None, + ): + """ + Aggregates generators and loads at the same bus. + + By default all generators respectively loads at the same bus are aggregated. + You can specify further columns to consider in the aggregation, such as the + generator type or the load sector. Make sure to always include the bus in the + list of columns to aggregate by, as otherwise the topology would change. + + Be aware that by aggregating components you loose some information + e.g. on load sector or charging point use case. + + Parameters + ----------- + aggregate_generators_by_cols : list(str) or None + List of columns to aggregate generators at the same bus by. Valid + columns are all columns in + :attr:`~.network.topology.Topology.generators_df`. If an empty list is + given, generators are not aggregated. Defaults to None, in + which case all generators at the same bus are aggregated. + aggregate_loads_by_cols : list(str) + List of columns to aggregate loads at the same bus by. Valid + columns are all columns in + :attr:`~.network.topology.Topology.loads_df`. If an empty list is + given, generators are not aggregated. Defaults to None, in + which case all loads at the same bus are aggregated. + + """ - loads_df_grouped = loads_df_grouped.assign(type="charging_point") + def _aggregate_time_series(attribute, groups, naming): + return pd.concat( + [ + pd.DataFrame( + { + naming.format("_".join(k)) + if isinstance(k, tuple) + else naming.format(k): getattr(self.timeseries, attribute) + .loc[:, v] + .sum(axis=1) + } + ) + for k, v in groups.items() + ], + axis=1, + ) - self.topology.loads_df.drop( - index=self.topology.charging_points_df.index, inplace=True - ) + if aggregate_generators_by_cols is None: + aggregate_generators_by_cols = ["bus"] + if aggregate_loads_by_cols is None: + aggregate_loads_by_cols = ["bus"] - self.topology.loads_df = self.topology.loads_df.append( - loads_df_grouped.set_index("name") - ) + # aggregate generators + if ( + len(aggregate_generators_by_cols) > 0 + and not self.topology.generators_df.empty + ): - # set up new charging points time series - groups = loads_groupby.groups - - if isinstance(list(groups.keys())[0], tuple): - self.timeseries.charging_points_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.charging_points_active_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.charging_points_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - "_".join(k) - ): self.timeseries.charging_points_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - else: - self.timeseries.charging_points_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.charging_points_active_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.charging_points_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format( - k - ): self.timeseries.charging_points_reactive_power.loc[ - :, v - ].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) + gens_groupby = self.topology.generators_df.groupby( + aggregate_generators_by_cols + ) + naming = "Generators_{}" - # aggregate all loads (conventional loads and charging points) at the - # same bus - elif mode == "by_load_and_generation": - aggregate_loads_by_cols = ["bus"] - loads_groupby = self.topology.loads_df.loc[:, ["bus", "p_nom"]].groupby( - aggregate_loads_by_cols + # set up new generators_df + gens_df_grouped = gens_groupby.sum().reset_index() + gens_df_grouped["name"] = gens_df_grouped.apply( + lambda _: naming.format("_".join(_.loc[aggregate_generators_by_cols])), + axis=1, + ) + gens_df_grouped["control"] = "PQ" + if "weather_cell_id" in gens_df_grouped.columns: + gens_df_grouped.drop(columns=["weather_cell_id"], inplace=True) + self.topology.generators_df = gens_df_grouped.set_index("name") + + # set up new generator time series + self.timeseries.generators_active_power = _aggregate_time_series( + "generators_active_power", gens_groupby.groups, naming + ) + self.timeseries.generators_reactive_power = _aggregate_time_series( + "generators_reactive_power", gens_groupby.groups, naming ) + # aggregate loads + if len(aggregate_loads_by_cols) > 0 and not self.topology.loads_df.empty: + + loads_groupby = self.topology.loads_df.groupby(aggregate_loads_by_cols) naming = "Loads_{}" + # set up new loads_df loads_df_grouped = loads_groupby.sum().reset_index() loads_df_grouped["name"] = loads_df_grouped.apply( lambda _: naming.format("_".join(_.loc[aggregate_loads_by_cols])), axis=1, ) - - loads_df_grouped = loads_df_grouped.assign(type="load") - self.topology.loads_df = loads_df_grouped.set_index("name") # set up new loads time series - groups = loads_groupby.groups - ts_active = pd.concat( - [ - self.timeseries.loads_active_power, - self.timeseries.charging_points_active_power, - ], - axis=1, - ) - ts_reactive = pd.concat( - [ - self.timeseries.loads_reactive_power, - self.timeseries.charging_points_reactive_power, - ], - axis=1, - ) - if isinstance(list(groups.keys())[0], tuple): - - self.timeseries.loads_active_power = pd.concat( - [ - pd.DataFrame( - { - naming.format("_".join(k)): ts_active.loc[:, v].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.loads_reactive_power = pd.concat( - [ - pd.DataFrame( - { - naming.format("_".join(k)): ts_reactive.loc[:, v].sum( - axis=1 - ) - } - ) - for k, v in groups.items() - ], - axis=1, - ) - else: - self.timeseries.loads_active_power = pd.concat( - [ - pd.DataFrame( - {naming.format(k): ts_active.loc[:, v].sum(axis=1)} - ) - for k, v in groups.items() - ], - axis=1, - ) - self.timeseries.loads_reactive_power = pd.concat( - [ - pd.DataFrame( - {naming.format(k): ts_reactive.loc[:, v].sum(axis=1)} - ) - for k, v in groups.items() - ], - axis=1, - ) - # overwrite charging points - self.timeseries.charging_points_active_power = pd.DataFrame( - index=self.timeseries.timeindex + self.timeseries.loads_active_power = _aggregate_time_series( + "loads_active_power", loads_groupby.groups, naming ) - self.timeseries.charging_points_reactive_power = pd.DataFrame( - index=self.timeseries.timeindex + self.timeseries.loads_reactive_power = _aggregate_time_series( + "loads_reactive_power", loads_groupby.groups, naming ) def plot_mv_grid_topology(self, technologies=False, **kwargs): @@ -968,7 +1299,7 @@ def plot_mv_grid_topology(self, technologies=False, **kwargs): Parameters ---------- - technologies : :obj:`Boolean` + technologies : bool If True plots stations, generators, etc. in the topology in different colors. If False does not plot any nodes. Default: False. @@ -1127,7 +1458,8 @@ def histogram_voltage(self, timestep=None, title=True, **kwargs): Parameters ---------- - timestep : :pandas:`pandas.Timestamp` or list(:pandas:`pandas.Timestamp`) or None, optional + timestep : :pandas:`pandas.Timestamp` or \ + list(:pandas:`pandas.Timestamp`) or None, optional Specifies time steps histogram is plotted for. If timestep is None all time steps voltages are calculated for are used. Default: None. title : :obj:`str` or :obj:`bool`, optional @@ -1157,10 +1489,11 @@ def histogram_voltage(self, timestep=None, title=True, **kwargs): if title is True: if len(timestep) == 1: - title = "Voltage histogram for time step {}".format(timestep[0]) + title = f"Voltage histogram for time step {timestep[0]}" else: - title = "Voltage histogram \nfor time steps {} to {}".format( - timestep[0], timestep[-1] + title = ( + f"Voltage histogram \nfor time steps {timestep[0]} to " + f"{timestep[-1]}" ) elif title is False: title = None @@ -1179,7 +1512,8 @@ def histogram_relative_line_load( Parameters ---------- - timestep : :pandas:`pandas.Timestamp` or list(:pandas:`pandas.Timestamp`) or None, optional + timestep : :pandas:`pandas.Timestamp` or \ + list(:pandas:`pandas.Timestamp`) or None, optional Specifies time step(s) histogram is plotted for. If `timestep` is None all time steps currents are calculated for are used. Default: None. @@ -1228,13 +1562,11 @@ def histogram_relative_line_load( if title is True: if len(timestep) == 1: - title = "Relative line load histogram for time step {}".format( - timestep[0] - ) + title = f"Relative line load histogram for time step {timestep[0]}" else: title = ( "Relative line load histogram \nfor time steps " - "{} to {}".format(timestep[0], timestep[-1]) + f"{timestep[0]} to {timestep[-1]}" ) elif title is False: title = None @@ -1246,7 +1578,7 @@ def save( save_results=True, save_topology=True, save_timeseries=True, - **kwargs + **kwargs, ): """ Saves EDisGo object to csv. @@ -1306,299 +1638,10 @@ def save( to_type=kwargs.get("to_type", "float32"), ) - def add_component( - self, - comp_type, - add_ts=True, - ts_active_power=None, - ts_reactive_power=None, - **kwargs - ): - """ - Adds single component to network topology. - - Components can be lines or buses as well as generators, loads, - charging points or storage units. - - Parameters - ---------- - comp_type : str - Type of added component. Can be 'Bus', 'Line', 'Load', 'Generator', - 'StorageUnit', 'Transformer' or 'ChargingPoint'. Everything else is added as - load. - add_ts : bool - Indicator if time series for component are added as well. - ts_active_power : :pandas:`pandas.Series` - Active power time series of added component. Index of the series - must contain all time steps in - :attr:`~.network.timeseries.TimeSeries.timeindex`. - Values are active power per time step in MW. - ts_reactive_power : :pandas:`pandas.Series` - Reactive power time series of added component. Index of the series - must contain all time steps in - :attr:`~.network.timeseries.TimeSeries.timeindex`. - Values are reactive power per time step in MVA. - **kwargs: dict - Attributes of added component. See respective functions for required - entries. For 'Load', 'Generator' and 'StorageUnit' the boolean - add_ts determines whether a time series is created for the new - component or not. - - Todo: change into add_components to allow adding of several components - at a time, change topology.add_load etc. to add_loads, where - lists of parameters can be inserted - """ - if comp_type == "Bus": - comp_name = self.topology.add_bus(**kwargs) - - elif comp_type == "Line": - comp_name = self.topology.add_line(**kwargs) - - elif comp_type == "Generator": - comp_name = self.topology.add_generator(**kwargs) - if add_ts: - timeseries.add_generators_timeseries( - edisgo_obj=self, generator_names=comp_name, **kwargs - ) - - elif comp_type == "StorageUnit": - comp_name = self.topology.add_storage_unit( - **kwargs, - ) - if add_ts: - if isinstance(ts_active_power, pd.Series): - ts_active_power = pd.DataFrame({comp_name: ts_active_power}) - if isinstance(ts_reactive_power, pd.Series): - ts_reactive_power = pd.DataFrame({comp_name: ts_reactive_power}) - timeseries.add_storage_units_timeseries( - edisgo_obj=self, - storage_unit_names=comp_name, - timeseries_storage_units=ts_active_power, - timeseries_storage_units_reactive_power=ts_reactive_power, - **kwargs, - ) - - else: - if "charging" in comp_type.lower(): - type = "charging_point" - elif "heat" in comp_type.lower(): - type = "heat_pump" - else: - type = "load" - - kwargs["type"] = type - - comp_name = self.topology.add_load(**kwargs) - - if add_ts: - if "charging" in comp_type.lower(): - if ts_active_power is not None and ts_reactive_power is not None: - timeseries.add_charging_points_timeseries( - self, - [comp_name], - ts_active_power=pd.DataFrame({comp_name: ts_active_power}), - ts_reactive_power=pd.DataFrame( - {comp_name: ts_reactive_power} - ), - ) - - else: - raise ValueError( - "Time series for charging points need to be provided." - ) - - else: - timeseries.add_loads_timeseries( - edisgo_obj=self, load_names=comp_name, **kwargs - ) - - return comp_name - - def integrate_component( - self, - comp_type, - geolocation, - voltage_level=None, - add_ts=True, - ts_active_power=None, - ts_reactive_power=None, - **kwargs - ): - """ - Adds single component to topology based on geolocation. - - Currently components can be generators or charging points. - - Parameters - ---------- - comp_type : str - Type of added component. Can be 'Generator' or 'ChargingPoint'. - geolocation : :shapely:`shapely.Point` or tuple - Geolocation of the new component. In case of tuple, the geolocation - must be given in the form (longitude, latitude). - voltage_level : int, optional - Specifies the voltage level the new component is integrated in. - Possible options are 4 (MV busbar), 5 (MV grid), 6 (LV busbar) or - 7 (LV grid). If no voltage level is provided the voltage level - is determined based on the nominal power `p_nom` (given as kwarg) - as follows: - - * voltage level 4 (MV busbar): nominal power between 4.5 MW and - 17.5 MW - * voltage level 5 (MV grid) : nominal power between 0.3 MW and - 4.5 MW - * voltage level 6 (LV busbar): nominal power between 0.1 MW and - 0.3 MW - * voltage level 7 (LV grid): nominal power below 0.1 MW - - add_ts : bool, optional - Indicator if time series for component are added as well. - Default: True. - ts_active_power : :pandas:`pandas.Series`, optional - Active power time series of added component. Index of the series - must contain all time steps in - :attr:`~.network.timeseries.TimeSeries.timeindex`. - Values are active power per time step in MW. Currently, if you want - to add time series (if `add_ts` is True), you must provide a - time series. It is not automatically retrieved. - ts_reactive_power : :pandas:`pandas.Series`, optional - Reactive power time series of added component. Index of the series - must contain all time steps in - :attr:`~.network.timeseries.TimeSeries.timeindex`. - Values are reactive power per time step in MVA. Currently, if you - want to add time series (if `add_ts` is True), you must provide a - time series. It is not automatically retrieved. - - Other Parameters - ------------------ - kwargs : - Attributes of added component. - See :attr:`~.network.topology.Topology.add_generator` respectively - :attr:`~.network.topology.Topology.add_charging_point` methods - for more information on required and optional parameters of - generators and charging points. - - """ - supported_voltage_levels = {4, 5, 6, 7} - p_nom = kwargs.get("p_nom", None) - if voltage_level not in supported_voltage_levels: - if p_nom is None: - raise ValueError( - "Neither appropriate voltage level nor nominal power " - "were supplied." - ) - # Determine voltage level manually from nominal power - if 4.5 < p_nom <= 17.5: - voltage_level = 4 - elif 0.3 < p_nom <= 4.5: - voltage_level = 5 - elif 0.1 < p_nom <= 0.3: - voltage_level = 6 - elif 0 < p_nom <= 0.1: - voltage_level = 7 - else: - raise ValueError("Unsupported voltage level") - - # check if geolocation is given as shapely Point, otherwise transform - # to shapely Point - if not type(geolocation) is Point: - geolocation = Point(geolocation) - - # Connect in MV - if voltage_level in [4, 5]: - kwargs["voltage_level"] = voltage_level - kwargs["geom"] = geolocation - comp_name = self.topology.connect_to_mv(self, kwargs, comp_type) - - # Connect in LV - else: - substations = self.topology.buses_df.loc[ - self.topology.transformers_df.bus1.unique() - ] - nearest_substation, _ = find_nearest_bus(geolocation, substations) - kwargs["mvlv_subst_id"] = int(nearest_substation.split("_")[-2]) - kwargs["geom"] = geolocation - kwargs["voltage_level"] = voltage_level - comp_name = self.topology.connect_to_lv(self, kwargs, comp_type) - - if add_ts: - if comp_type == "Generator": - # ToDo: Adding time series for generators manually does - # currently not work - func = timeseries.add_generators_timeseries - else: - func = timeseries.add_charging_points_timeseries - func( - self, - [comp_name], - ts_active_power=pd.DataFrame({comp_name: ts_active_power}), - ts_reactive_power=pd.DataFrame({comp_name: ts_reactive_power}), - ) - - return comp_name - - def remove_component(self, comp_type, comp_name, drop_ts=True): - """ - Removes single component from respective DataFrame. If drop_ts is set - to True, timeseries of elements are deleted as well. - - Parameters - ---------- - comp_type: str - Type of removed component. Can be 'Bus', 'Line', 'Load', - 'Generator', 'StorageUnit', 'Transformer'. - comp_name: str - Name of component to be removed. - drop_ts: Boolean - Indicator if timeseries for component are removed as well. Defaults - to True. - - Todo: change into remove_components, when add_component is changed into - add_components, to allow removal of several components at a time - - """ - if comp_type == "Bus": - self.topology.remove_bus(comp_name) - elif comp_type == "Line": - self.topology.remove_line(comp_name) - elif comp_type == "Load": - self.topology.remove_load(comp_name) - if drop_ts: - timeseries._drop_existing_component_timeseries( - edisgo_obj=self, comp_type="loads", comp_names=comp_name - ) - - elif comp_type == "Generator": - self.topology.remove_generator(comp_name) - if drop_ts: - timeseries._drop_existing_component_timeseries( - edisgo_obj=self, - comp_type="generators", - comp_names=comp_name, - ) - elif comp_type == "StorageUnit": - self.topology.remove_storage_unit(comp_name) - if drop_ts: - timeseries._drop_existing_component_timeseries( - edisgo_obj=self, - comp_type="storage_units", - comp_names=comp_name, - ) - elif comp_type == "ChargingPoint": - self.topology.remove_load(comp_name) - if drop_ts: - timeseries._drop_existing_component_timeseries( - edisgo_obj=self, - comp_type="charging_points", - comp_names=comp_name, - ) - else: - raise ValueError("Component type is not correct.") - def save_edisgo_to_pickle(self, path="", filename=None): abs_path = os.path.abspath(path) if filename is None: - filename = "edisgo_object_{ext}.pkl".format(ext=self.topology.mv_grid.id) + filename = f"edisgo_object_{self.topology.mv_grid.id}.pkl" pickle.dump(self, open(os.path.join(abs_path, filename), "wb")) def reduce_memory(self, **kwargs): @@ -1635,6 +1678,66 @@ def reduce_memory(self, **kwargs): attr_to_reduce=kwargs.get("results_attr_to_reduce", None), ) + def check_integrity(self): + """ + Method to check the integrity of the EDisGo object. + + Checks for consistency of topology (see + :func:`edisgo.topology.check_integrity`), timeseries (see + :func:`edisgo.timeseries.check_integrity`) and the interplay of both. + + """ + self.topology.check_integrity() + self.timeseries.check_integrity() + + # check consistency of topology and timeseries + comp_types = ["generators", "loads", "storage_units"] + + for comp_type in comp_types: + comps = getattr(self.topology, comp_type + "_df") + + for ts in ["active_power", "reactive_power"]: + comp_ts_name = f"{comp_type}_{ts}" + comp_ts = getattr(self.timeseries, comp_ts_name) + + # check whether all components in topology have an entry in the + # respective active and reactive power timeseries + missing = comps.index[~comps.index.isin(comp_ts.columns)] + if len(missing) > 0: + logger.warning( + f"The following {comp_type} are missing in {comp_ts_name}: " + f"{missing.values}" + ) + + # check whether all elements in timeseries have an entry in the topology + missing_ts = comp_ts.columns[~comp_ts.columns.isin(comps.index)] + if len(missing_ts) > 0: + logger.warning( + f"The following {comp_type} have entries in {comp_ts_name}, but" + f" not in {comp_type}_df: {missing_ts.values}" + ) + + # check if the active powers inside the timeseries exceed the given nominal + # or peak power of the component + if comp_type in ["generators", "storage_units"]: + attr = "p_nom" + else: + attr = "p_set" + + active_power = getattr(self.timeseries, f"{comp_type}_active_power") + comps_complete = comps.index[comps.index.isin(active_power.columns)] + exceeding = comps_complete[ + (active_power[comps_complete].max() > comps.loc[comps_complete, attr]) + ] + + if len(exceeding) > 0: + logger.warning( + f"Values of active power in the timeseries object exceed {attr} for" + f" the following {comp_type}: {exceeding.values}" + ) + + logging.info("Integrity check finished. Please pay attention to warnings.") + def import_edisgo_from_pickle(filename, path=""): abs_path = os.path.abspath(path) @@ -1646,7 +1749,7 @@ def import_edisgo_from_files( import_topology=True, import_timeseries=False, import_results=False, - **kwargs + **kwargs, ): edisgo_obj = EDisGo(import_timeseries=False) if import_topology: @@ -1668,13 +1771,14 @@ def import_edisgo_from_files( edisgo_obj.results.from_csv(os.path.join(directory, "results"), parameters) else: logging.warning("No results directory found. Results not imported.") - if kwargs.get("import_residual_load", False): - if os.path.exists(os.path.join(directory, "time_series_sums.csv")): - residual_load = ( - pd.read_csv(os.path.join(directory, "time_series_sums.csv")) - .rename(columns={"Unnamed: 0": "timeindex"}) - .set_index("timeindex")["residual_load"] - ) - residual_load.index = pd.to_datetime(residual_load.index) - edisgo_obj.timeseries._residual_load = residual_load + if kwargs.get("import_residual_load", False) and os.path.exists( + os.path.join(directory, "time_series_sums.csv") + ): + residual_load = ( + pd.read_csv(os.path.join(directory, "time_series_sums.csv")) + .rename(columns={"Unnamed: 0": "timeindex"}) + .set_index("timeindex")["residual_load"] + ) + residual_load.index = pd.to_datetime(residual_load.index) + edisgo_obj.timeseries._residual_load = residual_load return edisgo_obj diff --git a/edisgo/flex_opt/check_tech_constraints.py b/edisgo/flex_opt/check_tech_constraints.py index d9d7d9321..2d5e12c71 100644 --- a/edisgo/flex_opt/check_tech_constraints.py +++ b/edisgo/flex_opt/check_tech_constraints.py @@ -8,7 +8,7 @@ from edisgo.network.grids import LVGrid, MVGrid -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def mv_line_load(edisgo_obj): @@ -138,12 +138,12 @@ def lines_allowed_load(edisgo_obj, voltage_level): ) i_lines_allowed_per_case = {} - i_lines_allowed_per_case["feedin_case"] = ( + i_lines_allowed_per_case["feed-in_case"] = ( lines_df.s_nom / sqrt(3) / nominal_voltage * edisgo_obj.config["grid_expansion_load_factors"][ - "{}_feedin_case_line".format(voltage_level) + "{}_feed-in_case_line".format(voltage_level) ] ) @@ -173,9 +173,12 @@ def lines_allowed_load(edisgo_obj, voltage_level): ) # lines in radial feeders are not n-1 secure anyways - i_lines_allowed_per_case["load_case"] = i_lines_allowed_per_case[ - "load_case" - ].append(lines_df.loc[lines_radial_feeders].s_nom / sqrt(3) / nominal_voltage) + i_lines_allowed_per_case["load_case"] = pd.concat( + [ + i_lines_allowed_per_case["load_case"], + lines_df.loc[lines_radial_feeders].s_nom / sqrt(3) / nominal_voltage, + ] + ) i_lines_allowed = edisgo_obj.timeseries.timesteps_load_feedin_case.loc[ edisgo_obj.results.i_res.index @@ -261,7 +264,7 @@ def _line_load(edisgo_obj, voltage_level): ) crit_lines.loc[:, "voltage_level"] = voltage_level else: - crit_lines = pd.DataFrame() + crit_lines = pd.DataFrame(dtype=float) return crit_lines @@ -328,9 +331,14 @@ def mv_lv_station_load(edisgo_obj): """ - crit_stations = pd.DataFrame() + crit_stations = pd.DataFrame(dtype=float) for lv_grid in edisgo_obj.topology.mv_grid.lv_grids: - crit_stations = crit_stations.append(_station_load(edisgo_obj, lv_grid)) + crit_stations = pd.concat( + [ + crit_stations, + _station_load(edisgo_obj, lv_grid), + ] + ) if not crit_stations.empty: logger.debug( "==> {} MV/LV station(s) has/have load issues.".format( @@ -392,9 +400,10 @@ def _station_load(edisgo_obj, grid): s_station = sum(transformers_df.s_nom) load_factor = edisgo_obj.timeseries.timesteps_load_feedin_case.apply( lambda _: edisgo_obj.config["grid_expansion_load_factors"][ - "{}_{}_transformer".format(voltage_level, _) + f"{voltage_level}_{_}_transformer" ] ) + s_station_allowed = s_station * load_factor # calculate residual apparent power (if negative, station is over-loaded) @@ -406,17 +415,16 @@ def _station_load(edisgo_obj, grid): # devided by the load factor to account for load factors smaller than # one, which lead to a higher needed additional capacity) s_missing = (s_res / load_factor).dropna() - crit_stations = pd.DataFrame( + return pd.DataFrame( { "s_missing": abs(s_missing.min()), "time_index": s_missing.idxmin(), }, index=[repr(grid)], ) - else: - crit_stations = pd.DataFrame() - return crit_stations + else: + return pd.DataFrame(dtype=float) def mv_voltage_deviation(edisgo_obj, voltage_levels="mv_lv"): @@ -624,9 +632,9 @@ def _mv_allowed_voltage_limits(edisgo_obj, voltage_levels): # get config values for lower voltage limit in feed-in case and upper # voltage limit in load case - v_allowed_per_case["feedin_case_lower"] = edisgo_obj.config[ + v_allowed_per_case["feed-in_case_lower"] = edisgo_obj.config[ "grid_expansion_allowed_voltage_deviations" - ]["feedin_case_lower"] + ]["feed-in_case_lower"] v_allowed_per_case["load_case_upper"] = edisgo_obj.config[ "grid_expansion_allowed_voltage_deviations" ]["load_case_upper"] @@ -641,12 +649,12 @@ def _mv_allowed_voltage_limits(edisgo_obj, voltage_levels): ] if voltage_levels == "mv_lv" or voltage_levels == "mv": - v_allowed_per_case["feedin_case_upper"] = ( + v_allowed_per_case["feed-in_case_upper"] = ( 1 + offset + control_deviation + edisgo_obj.config["grid_expansion_allowed_voltage_deviations"][ - "{}_feedin_case_max_v_deviation".format(voltage_levels) + "{}_feed-in_case_max_v_deviation".format(voltage_levels) ] ) v_allowed_per_case["load_case_lower"] = ( @@ -717,10 +725,10 @@ def _lv_allowed_voltage_limits(edisgo_obj, lv_grid, mode): # calculate upper voltage limit in feed-in case and lower voltage limit in # load case - v_allowed_per_case["feedin_case_upper"] = ( + v_allowed_per_case["feed-in_case_upper"] = ( voltage_base + edisgo_obj.config["grid_expansion_allowed_voltage_deviations"][ - "{}_feedin_case_max_v_deviation".format(config_string) + "{}_feed-in_case_max_v_deviation".format(config_string) ] ) v_allowed_per_case["load_case_lower"] = ( @@ -731,9 +739,9 @@ def _lv_allowed_voltage_limits(edisgo_obj, lv_grid, mode): ) timeindex = voltage_base.index - v_allowed_per_case["feedin_case_lower"] = pd.Series( + v_allowed_per_case["feed-in_case_lower"] = pd.Series( edisgo_obj.config["grid_expansion_allowed_voltage_deviations"][ - "feedin_case_lower" + "feed-in_case_lower" ], index=timeindex, ) @@ -834,16 +842,22 @@ def voltage_diff(edisgo_obj, buses, v_dev_allowed_upper, v_dev_allowed_lower): buses_ov = v_mag_pu_pfa[ overvoltage[~overvoltage.index.isin(buses_both.columns)].index ] - voltage_diff_ov = voltage_diff_ov.append( - buses_ov.T - v_dev_allowed_upper.loc[v_mag_pu_pfa.index].values + voltage_diff_ov = pd.concat( + [ + voltage_diff_ov, + buses_ov.T - v_dev_allowed_upper.loc[v_mag_pu_pfa.index].values, + ] ) # handle buses with undervoltage issues and append to voltage_diff_uv buses_uv = v_mag_pu_pfa[ undervoltage[~undervoltage.index.isin(buses_both.columns)].index ] - voltage_diff_uv = voltage_diff_uv.append( - -buses_uv.T + v_dev_allowed_lower.loc[v_mag_pu_pfa.index].values + voltage_diff_uv = pd.concat( + [ + voltage_diff_uv, + -buses_uv.T + v_dev_allowed_lower.loc[v_mag_pu_pfa.index].values, + ] ) return voltage_diff_uv, voltage_diff_ov @@ -893,7 +907,7 @@ def _append_crit_buses(df): index=df.index, ) - crit_buses_grid = pd.DataFrame() + crit_buses_grid = pd.DataFrame(dtype=float) voltage_diff_uv, voltage_diff_ov = voltage_diff( edisgo_obj, buses, v_limits_upper, v_limits_lower @@ -901,9 +915,19 @@ def _append_crit_buses(df): # append to crit buses dataframe if not voltage_diff_ov.empty: - crit_buses_grid = crit_buses_grid.append(_append_crit_buses(voltage_diff_ov)) + crit_buses_grid = pd.concat( + [ + crit_buses_grid, + _append_crit_buses(voltage_diff_ov), + ] + ) if not voltage_diff_uv.empty: - crit_buses_grid = crit_buses_grid.append(_append_crit_buses(voltage_diff_uv)) + crit_buses_grid = pd.concat( + [ + crit_buses_grid, + _append_crit_buses(voltage_diff_uv), + ] + ) if not crit_buses_grid.empty: crit_buses_grid.sort_values(by=["v_diff_max"], ascending=False, inplace=True) diff --git a/edisgo/flex_opt/costs.py b/edisgo/flex_opt/costs.py index b1512b4aa..24dc0a354 100644 --- a/edisgo/flex_opt/costs.py +++ b/edisgo/flex_opt/costs.py @@ -1,6 +1,5 @@ import os -import numpy as np import pandas as pd if "READTHEDOCS" not in os.environ: @@ -79,14 +78,17 @@ def _get_transformer_costs(trafos): }, index=hvmv_trafos, ) - costs_trafos = costs_trafos.append( - pd.DataFrame( - { - "costs_transformers": len(mvlv_trafos) - * [float(edisgo_obj.config["costs_transformers"]["lv"])] - }, - index=mvlv_trafos, - ) + costs_trafos = pd.concat( + [ + costs_trafos, + pd.DataFrame( + { + "costs_transformers": len(mvlv_trafos) + * [float(edisgo_obj.config["costs_transformers"]["lv"])] + }, + index=mvlv_trafos, + ), + ] ) return costs_trafos.loc[trafos.index, "costs_transformers"].values @@ -100,7 +102,7 @@ def _get_line_costs(lines_added): return costs_lines[["costs", "voltage_level"]] - costs = pd.DataFrame() + costs = pd.DataFrame(dtype=float) if without_generator_import: equipment_changes = edisgo_obj.results.equipment_changes.loc[ @@ -124,21 +126,27 @@ def _get_line_costs(lines_added): ~added_transformers["equipment"].isin(added_removed_transformers.equipment) ] # calculate costs for transformers - all_trafos = edisgo_obj.topology.transformers_hvmv_df.append( - edisgo_obj.topology.transformers_df + all_trafos = pd.concat( + [ + edisgo_obj.topology.transformers_hvmv_df, + edisgo_obj.topology.transformers_df, + ] ) trafos = all_trafos.loc[added_transformers["equipment"]] # calculate costs for each transformer - costs = costs.append( - pd.DataFrame( - { - "type": trafos.type_info.values, - "total_costs": _get_transformer_costs(trafos), - "quantity": len(trafos) * [1], - "voltage_level": len(trafos) * ["mv/lv"], - }, - index=trafos.index, - ) + costs = pd.concat( + [ + costs, + pd.DataFrame( + { + "type": trafos.type_info.values, + "total_costs": _get_transformer_costs(trafos), + "quantity": len(trafos) * [1], + "voltage_level": len(trafos) * ["mv/lv"], + }, + index=trafos.index, + ), + ] ) # costs for lines @@ -163,35 +171,43 @@ def _get_line_costs(lines_added): ] if not lines_added.empty: line_costs = _get_line_costs(lines_added) - costs = costs.append( - pd.DataFrame( - { - "type": edisgo_obj.topology.lines_df.loc[ - lines_added.index, "type_info" - ].values, - "total_costs": line_costs.costs.values, - "length": (lines_added.quantity * lines_added.length).values, - "quantity": lines_added.quantity.values, - "voltage_level": line_costs.voltage_level.values, - }, - index=lines_added.index, - ) + costs = pd.concat( + [ + costs, + pd.DataFrame( + { + "type": edisgo_obj.topology.lines_df.loc[ + lines_added.index, "type_info" + ].values, + "total_costs": line_costs.costs.values, + "length": ( + lines_added.quantity * lines_added.length + ).values, + "quantity": lines_added.quantity.values, + "voltage_level": line_costs.voltage_level.values, + }, + index=lines_added.index, + ), + ] ) # if no costs incurred write zero costs to DataFrame if costs.empty: - costs = costs.append( - pd.DataFrame( - { - "type": ["N/A"], - "total_costs": [0], - "length": [0], - "quantity": [0], - "voltage_level": "", - "mv_feeder": "", - }, - index=["No reinforced equipment."], - ) + costs = pd.concat( + [ + costs, + pd.DataFrame( + { + "type": ["N/A"], + "total_costs": [0], + "length": [0], + "quantity": [0], + "voltage_level": "", + "mv_feeder": "", + }, + index=["No reinforced equipment."], + ), + ] ) return costs @@ -257,15 +273,18 @@ def line_expansion_costs(edisgo_obj, lines_names): index=mv_lines, ) - costs_lines = costs_lines.append( - pd.DataFrame( - { - "costs_earthworks": (costs_cable_earthwork_lv - costs_cable_lv) - * lines_df.loc[lv_lines].length, - "costs_cable": costs_cable_lv * lines_df.loc[lv_lines].length, - "voltage_level": ["lv"] * len(lv_lines), - }, - index=lv_lines, - ) + costs_lines = pd.concat( + [ + costs_lines, + pd.DataFrame( + { + "costs_earthworks": (costs_cable_earthwork_lv - costs_cable_lv) + * lines_df.loc[lv_lines].length, + "costs_cable": costs_cable_lv * lines_df.loc[lv_lines].length, + "voltage_level": ["lv"] * len(lv_lines), + }, + index=lv_lines, + ), + ] ) return costs_lines.loc[lines_df.index] diff --git a/edisgo/flex_opt/curtailment.py b/edisgo/flex_opt/curtailment.py index e116a4d4e..d0240c827 100644 --- a/edisgo/flex_opt/curtailment.py +++ b/edisgo/flex_opt/curtailment.py @@ -120,17 +120,17 @@ def voltage_based( if not combined_analysis: allowed_voltage_dev_mv = edisgo.network.config[ "grid_expansion_allowed_voltage_deviations" - ]["mv_feedin_case_max_v_deviation"] + ]["mv_feed-in_case_max_v_deviation"] allowed_voltage_diff_lv = edisgo.network.config[ "grid_expansion_allowed_voltage_deviations" - ]["lv_feedin_case_max_v_deviation"] + ]["lv_feed-in_case_max_v_deviation"] else: allowed_voltage_dev_mv = edisgo.network.config[ "grid_expansion_allowed_voltage_deviations" - ]["mv_lv_feedin_case_max_v_deviation"] + ]["mv_lv_feed-in_case_max_v_deviation"] allowed_voltage_diff_lv = edisgo.network.config[ "grid_expansion_allowed_voltage_deviations" - ]["mv_lv_feedin_case_max_v_deviation"] + ]["mv_lv_feed-in_case_max_v_deviation"] # assign allowed voltage deviation to each generator if not edisgo.network.pypsa.edisgo_mode: @@ -217,12 +217,15 @@ def voltage_based( curtailment = pd.DataFrame() # set curtailment for other time steps to zero - curtailment = curtailment.append( - pd.DataFrame( - 0, - columns=feedin.columns, - index=curtailment_timeseries[curtailment_timeseries <= 0].index, - ) + curtailment = pd.concat( + [ + curtailment, + pd.DataFrame( + 0, + columns=feedin.columns, + index=curtailment_timeseries[curtailment_timeseries <= 0].index, + ), + ] ) # check if curtailment target was met @@ -596,7 +599,10 @@ def __init__( # get all fluctuating generators and their attributes (weather ID, # type, etc.) - generators = get_gen_info(edisgo.topology, "mvlv", fluctuating=True) + # TODO: Function get_gen_info does not exist + generators = get_gen_info( # noqa: F821 + edisgo.topology, "mvlv", fluctuating=True + ) # do analyze to get all voltages at generators and feed-in dataframe edisgo.analyze(mode=mode) @@ -696,7 +702,7 @@ def _check_timeindex(self, curtailment_timeseries, network): raise KeyError(message) try: curtailment_timeseries.loc[network.timeseries.timeindex] - except: + except Exception: message = ( "Time index of curtailment time series does not match " "with load and feed-in time series." diff --git a/edisgo/flex_opt/exceptions.py b/edisgo/flex_opt/exceptions.py index 85116e74b..6b2bee53d 100644 --- a/edisgo/flex_opt/exceptions.py +++ b/edisgo/flex_opt/exceptions.py @@ -9,8 +9,11 @@ class MaximumIterationError(Error): Exception raised when maximum number of iterations in network reinforcement is exceeded. - Attributes: - message -- explanation of the error + Attributes + ----------- + message : str + Explanation of the error + """ def __init__(self, message): @@ -21,8 +24,11 @@ class ImpossibleVoltageReduction(Error): """ Exception raised when voltage issue cannot be solved. - Attributes: - message -- explanation of the error + Attributes + ----------- + message : str + Explanation of the error + """ def __init__(self, message): diff --git a/edisgo/flex_opt/q_control.py b/edisgo/flex_opt/q_control.py new file mode 100644 index 000000000..c36a325f1 --- /dev/null +++ b/edisgo/flex_opt/q_control.py @@ -0,0 +1,212 @@ +import numpy as np +import pandas as pd + + +def get_q_sign_generator(reactive_power_mode): + """ + Get the sign of reactive power in generator sign convention. + + In the generator sign convention the reactive power is negative in + inductive operation (`reactive_power_mode` is 'inductive') and positive + in capacitive operation (`reactive_power_mode` is 'capacitive'). + + Parameters + ---------- + reactive_power_mode : str + Possible options are 'inductive' and 'capacitive'. + + Returns + -------- + int + Sign of reactive power in generator sign convention. + + """ + if reactive_power_mode.lower() == "inductive": + return -1 + elif reactive_power_mode.lower() == "capacitive": + return 1 + else: + raise ValueError( + "reactive_power_mode must either be 'capacitive' " + "or 'inductive' but is {}.".format(reactive_power_mode) + ) + + +def get_q_sign_load(reactive_power_mode): + """ + Get the sign of reactive power in load sign convention. + + In the load sign convention the reactive power is positive in + inductive operation (`reactive_power_mode` is 'inductive') and negative + in capacitive operation (`reactive_power_mode` is 'capacitive'). + + Parameters + ---------- + reactive_power_mode : str + Possible options are 'inductive' and 'capacitive'. + + Returns + -------- + int + Sign of reactive power in load sign convention. + + """ + if reactive_power_mode.lower() == "inductive": + return 1 + elif reactive_power_mode.lower() == "capacitive": + return -1 + else: + raise ValueError( + "reactive_power_mode must either be 'capacitive' " + "or 'inductive' but is {}.".format(reactive_power_mode) + ) + + +def fixed_cosphi(active_power, q_sign, power_factor): + """ + Calculates reactive power for a fixed cosphi operation. + + Parameters + ---------- + active_power : :pandas:`pandas.DataFrame` + Dataframe with active power time series. Columns of the dataframe are + names of the components and index of the dataframe are the time steps + reactive power is calculated for. + q_sign : :pandas:`pandas.Series` or int + `q_sign` defines whether the reactive power is positive or + negative and must either be -1 or +1. In case `q_sign` is given as a + series, the index must contain the same component names as given in + columns of parameter `active_power`. + power_factor : :pandas:`pandas.Series` or float + Ratio of real to apparent power. + In case `power_factor` is given as a series, the index must contain the + same component names as given in columns of parameter `active_power`. + + Returns + ------- + :pandas:`pandas.DataFrame` + Dataframe with the same format as the `active_power` dataframe, + containing the reactive power. + + """ + return active_power * q_sign * np.tan(np.arccos(power_factor)) + + +def _get_component_dict(): + """ + Helper function to translate from component type term used in function to the one + used in the config files. + + """ + comp_dict = { + "generators": "gen", + "storage_units": "storage", + "loads": "load", + "charging_points": "cp", + "heat_pumps": "hp", + } + return comp_dict + + +def _fixed_cosphi_default_power_factor(comp_df, component_type, configs): + """ + Gets fixed cosphi default reactive power factor for each given component. + + Parameters + ----------- + comp_df : :pandas:`pandas.DataFrame` + Dataframe with component names (in the index) of all components + reactive power factor needs to be set. Only required column is + column 'voltage_level', giving the voltage level the component is in (the + voltage level can be set using the function + :func:`~.tools.tools.assign_voltage_level_to_component`). + All components must have the same `component_type`. + component_type : str + The component type determines the reactive power factor and mode used. + Possible options are 'generators', 'storage_units', 'loads', 'charging_points', + and 'heat_pumps'. + configs : :class:`~.tools.config.Config` + eDisGo configuration data. + + Returns + -------- + :pandas:`pandas.Series` + Series with default reactive power factor in case of fixed coshpi for each + component in index of `comp_df`. + + """ + reactive_power_factor = configs["reactive_power_factor"] + comp_dict = _get_component_dict() + + if component_type in comp_dict.keys(): + comp = comp_dict[component_type] + # write series with power factor for each component + power_factor = pd.Series(index=comp_df.index, dtype=float) + for voltage_level in comp_df.voltage_level.unique(): + cols = comp_df.index[comp_df.voltage_level == voltage_level] + if len(cols) > 0: + power_factor[cols] = reactive_power_factor[f"{voltage_level}_{comp}"] + return power_factor + else: + raise ValueError( + "Given 'component_type' is not valid. Valid options are " + "'generators','storage_units', 'loads', 'charging_points', and " + "'heat_pumps'." + ) + + +def _fixed_cosphi_default_reactive_power_sign(comp_df, component_type, configs): + """ + Gets fixed cosphi default value for sign of reactive power for each given component. + + Parameters + ----------- + comp_df : :pandas:`pandas.DataFrame` + Dataframe with component names (in the index) of all components sign of + reactive power needs to be set. Only required column is + column 'voltage_level', giving the voltage level the component is in (the + voltage level can be set using the function + :func:`~.tools.tools.assign_voltage_level_to_component`). + All components must have the same `component_type`. + component_type : str + The component type determines the reactive power factor and mode used. + Possible options are 'generators', 'storage_units', 'loads', 'charging_points', + and 'heat_pumps'. + configs : :class:`~.tools.config.Config` + eDisGo configuration data. + + Returns + -------- + :pandas:`pandas.Series` + Series with default sign of reactive power in case of fixed cosphi for each + component in index of `comp_df`. + + """ + reactive_power_mode = configs["reactive_power_mode"] + comp_dict = _get_component_dict() + q_sign_dict = { + "generators": get_q_sign_generator, + "storage_units": get_q_sign_generator, + "loads": get_q_sign_load, + "charging_points": get_q_sign_load, + "heat_pumps": get_q_sign_load, + } + + if component_type in comp_dict.keys(): + comp = comp_dict[component_type] + get_q_sign = q_sign_dict[component_type] + # write series with power factor for each component + q_sign = pd.Series(index=comp_df.index, dtype=float) + for voltage_level in comp_df.voltage_level.unique(): + cols = comp_df.index[comp_df.voltage_level == voltage_level] + if len(cols) > 0: + q_sign[cols] = get_q_sign( + reactive_power_mode[f"{voltage_level}_{comp}"] + ) + return q_sign + else: + raise ValueError( + "Given 'component_type' is not valid. Valid options are " + "'generators','storage_units', 'loads', 'charging_points', and " + "'heat_pumps'." + ) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index da3408428..d7eef850c 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -1,7 +1,11 @@ +from __future__ import annotations + import copy import datetime import logging +from typing import TYPE_CHECKING + import pandas as pd from edisgo.flex_opt import check_tech_constraints as checks @@ -9,17 +13,21 @@ from edisgo.flex_opt.costs import grid_expansion_costs from edisgo.tools import tools -logger = logging.getLogger("edisgo") +if TYPE_CHECKING: + from edisgo import EDisGo + from edisgo.network.results import Results + +logger = logging.getLogger(__name__) def reinforce_grid( - edisgo, - timesteps_pfa=None, - copy_grid=False, - max_while_iterations=10, - combined_analysis=False, - mode=None, -): + edisgo: EDisGo, + timesteps_pfa: str | pd.DatetimeIndex | pd.Timestamp | None = None, + copy_grid: bool = False, + max_while_iterations: int = 20, + combined_analysis: bool = False, + mode: str | None = None, +) -> Results: """ Evaluates network reinforcement needs and performs measures. @@ -79,6 +87,7 @@ def reinforce_grid( and neglecting LV network topology. LV load and generation is aggregated per LV network and directly connected to the secondary side of the respective MV/LV station. + * 'lv' to reinforce LV networks including MV/LV stations only. Returns ------- @@ -94,8 +103,9 @@ def reinforce_grid( """ def _add_lines_changes_to_equipment_changes(): - edisgo_reinforce.results.equipment_changes = ( - edisgo_reinforce.results.equipment_changes.append( + edisgo_reinforce.results.equipment_changes = pd.concat( + [ + edisgo_reinforce.results.equipment_changes, pd.DataFrame( { "iteration_step": [iteration_step] * len(lines_changes), @@ -106,29 +116,30 @@ def _add_lines_changes_to_equipment_changes(): "quantity": [_ for _ in lines_changes.values()], }, index=lines_changes.keys(), - ) - ) + ), + ], ) - def _add_transformer_changes_to_equipment_changes(mode): - for station, transformer_list in transformer_changes[mode].items(): - edisgo_reinforce.results.equipment_changes = ( - edisgo_reinforce.results.equipment_changes.append( - pd.DataFrame( - { - "iteration_step": [iteration_step] * len(transformer_list), - "change": [mode] * len(transformer_list), - "equipment": transformer_list, - "quantity": [1] * len(transformer_list), - }, - index=[station] * len(transformer_list), - ) - ) + def _add_transformer_changes_to_equipment_changes(mode: str | None): + df_list = [edisgo_reinforce.results.equipment_changes] + df_list.extend( + pd.DataFrame( + { + "iteration_step": [iteration_step] * len(transformer_list), + "change": [mode] * len(transformer_list), + "equipment": transformer_list, + "quantity": [1] * len(transformer_list), + }, + index=[station] * len(transformer_list), ) + for station, transformer_list in transformer_changes[mode].items() + ) + + edisgo_reinforce.results.equipment_changes = pd.concat(df_list) # check if provided mode is valid - if mode and mode not in ["mv", "mvlv"]: - raise ValueError("Provided mode {} is not a valid mode.".format(mode)) + if mode and mode not in ["mv", "mvlv", "lv"]: + raise ValueError(f"Provided mode {mode} is not a valid mode.") # in case reinforcement needs to be conducted on a copied graph the # edisgo object is deep copied @@ -138,7 +149,6 @@ def _add_transformer_changes_to_equipment_changes(mode): edisgo_reinforce = edisgo if timesteps_pfa is not None: - # if timesteps_pfa = 'snapshot_analysis' get snapshots if isinstance(timesteps_pfa, str) and timesteps_pfa == "snapshot_analysis": snapshots = tools.select_worstcase_snapshots(edisgo_reinforce) # drop None values in case any of the two snapshots does not exist @@ -154,28 +164,42 @@ def _add_transformer_changes_to_equipment_changes(mode): if hasattr(timesteps_pfa, "__iter__"): if not all(isinstance(_, datetime.datetime) for _ in timesteps_pfa): raise ValueError( - "Input {} for timesteps_pfa is not valid.".format(timesteps_pfa) + f"Input {timesteps_pfa} for timesteps_pfa is not valid." ) else: raise ValueError( - "Input {} for timesteps_pfa is not valid.".format(timesteps_pfa) + f"Input {timesteps_pfa} for timesteps_pfa is not valid." ) iteration_step = 1 - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + analyze_mode = None if mode == "lv" else mode - # REINFORCE OVERLOADED TRANSFORMERS AND LINES + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + # REINFORCE OVERLOADED TRANSFORMERS AND LINES logger.debug("==> Check station load.") - overloaded_mv_station = checks.hv_mv_station_load(edisgo_reinforce) - if mode == "mv": - overloaded_lv_stations = pd.DataFrame() - else: - overloaded_lv_stations = checks.mv_lv_station_load(edisgo_reinforce) + + overloaded_mv_station = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.hv_mv_station_load(edisgo_reinforce) + ) + + overloaded_lv_stations = ( + pd.DataFrame(dtype=float) if mode == "mv" else checks.mv_lv_station_load(edisgo_reinforce) + ) + logger.debug("==> Check line load.") - crit_lines = checks.mv_line_load(edisgo_reinforce) - if not mode: - crit_lines = crit_lines.append(checks.lv_line_load(edisgo_reinforce)) + + crit_lines = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.mv_line_load(edisgo_reinforce) + ) + + if not mode or mode == "lv": + crit_lines = pd.concat( + [ + crit_lines, + checks.lv_line_load(edisgo_reinforce), + ] + ) while_counter = 0 while ( @@ -217,15 +241,31 @@ def _add_transformer_changes_to_equipment_changes(mode): # run power flow analysis again (after updating pypsa object) and check # if all over-loading problems were solved logger.debug("==> Run power flow analysis.") - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + logger.debug("==> Recheck station load.") - overloaded_mv_station = checks.hv_mv_station_load(edisgo_reinforce) + overloaded_mv_station = ( + pd.DataFrame(dtype=float) + if mode == "lv" + else checks.hv_mv_station_load(edisgo_reinforce) + ) + if mode != "mv": overloaded_lv_stations = checks.mv_lv_station_load(edisgo_reinforce) + logger.debug("==> Recheck line load.") - crit_lines = checks.mv_line_load(edisgo_reinforce) - if not mode: - crit_lines = crit_lines.append(checks.lv_line_load(edisgo_reinforce)) + + crit_lines = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.mv_line_load(edisgo_reinforce) + ) + + if not mode or mode == "lv": + crit_lines = pd.concat( + [ + crit_lines, + checks.lv_line_load(edisgo_reinforce), + ] + ) iteration_step += 1 while_counter += 1 @@ -251,8 +291,7 @@ def _add_transformer_changes_to_equipment_changes(mode): ) else: logger.info( - "==> Load issues were solved in {} iteration " - "step(s).".format(while_counter) + f"==> Load issues were solved in {while_counter} iteration step(s)." ) # REINFORCE BRANCHES DUE TO VOLTAGE ISSUES @@ -260,13 +299,14 @@ def _add_transformer_changes_to_equipment_changes(mode): # solve voltage problems in MV topology logger.debug("==> Check voltage in MV topology.") - if combined_analysis: - voltage_levels = "mv_lv" - else: - voltage_levels = "mv" + voltage_levels = "mv_lv" if combined_analysis else "mv" - crit_nodes = checks.mv_voltage_deviation( - edisgo_reinforce, voltage_levels=voltage_levels + crit_nodes = ( + False + if mode == "lv" + else checks.mv_voltage_deviation( + edisgo_reinforce, voltage_levels=voltage_levels + ) ) while_counter = 0 @@ -284,11 +324,13 @@ def _add_transformer_changes_to_equipment_changes(mode): # run power flow analysis again (after updating pypsa object) and check # if all over-voltage problems were solved logger.debug("==> Run power flow analysis.") - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + logger.debug("==> Recheck voltage in MV topology.") crit_nodes = checks.mv_voltage_deviation( edisgo_reinforce, voltage_levels=voltage_levels ) + iteration_step += 1 while_counter += 1 @@ -303,21 +345,20 @@ def _add_transformer_changes_to_equipment_changes(mode): ) raise exceptions.MaximumIterationError( "Over-voltage issues for the following nodes in MV topology could " - "not be solved: {}".format(crit_nodes) + f"not be solved: {crit_nodes}" ) else: logger.info( - "==> Voltage issues in MV topology were solved in {} " - "iteration step(s).".format(while_counter) + f"==> Voltage issues in MV topology were solved in {while_counter} " + "iteration step(s)." ) # solve voltage problems at secondary side of LV stations if mode != "mv": logger.debug("==> Check voltage at secondary side of LV stations.") - if combined_analysis: - voltage_levels = "mv_lv" - else: - voltage_levels = "lv" + + voltage_levels = "mv_lv" if combined_analysis else "lv" + crit_stations = checks.lv_voltage_deviation( edisgo_reinforce, mode="stations", voltage_levels=voltage_levels ) @@ -336,7 +377,8 @@ def _add_transformer_changes_to_equipment_changes(mode): # run power flow analysis again (after updating pypsa object) and # check if all over-voltage problems were solved logger.debug("==> Run power flow analysis.") - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + logger.debug("==> Recheck voltage at secondary side of LV stations.") crit_stations = checks.lv_voltage_deviation( edisgo_reinforce, @@ -358,16 +400,16 @@ def _add_transformer_changes_to_equipment_changes(mode): ) raise exceptions.MaximumIterationError( "Over-voltage issues at busbar could not be solved for the " - "following LV grids: {}".format(crit_stations) + f"following LV grids: {crit_stations}" ) else: logger.info( "==> Voltage issues at busbars in LV grids were " - "solved in {} iteration step(s).".format(while_counter) + f"solved in {while_counter} iteration step(s)." ) # solve voltage problems in LV grids - if not mode: + if not mode or mode == "lv": logger.debug("==> Check voltage in LV grids.") crit_nodes = checks.lv_voltage_deviation( edisgo_reinforce, voltage_levels=voltage_levels @@ -389,7 +431,8 @@ def _add_transformer_changes_to_equipment_changes(mode): # run power flow analysis again (after updating pypsa object) # and check if all over-voltage problems were solved logger.debug("==> Run power flow analysis.") - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + logger.debug("==> Recheck voltage in LV grids.") crit_nodes = checks.lv_voltage_deviation( edisgo_reinforce, voltage_levels=voltage_levels @@ -409,25 +452,37 @@ def _add_transformer_changes_to_equipment_changes(mode): ) raise exceptions.MaximumIterationError( "Over-voltage issues for the following nodes in LV grids " - "could not be solved: {}".format(crit_nodes) + f"could not be solved: {crit_nodes}" ) else: logger.info( "==> Voltage issues in LV grids were solved " - "in {} iteration step(s).".format(while_counter) + f"in {while_counter} iteration step(s)." ) # RECHECK FOR OVERLOADED TRANSFORMERS AND LINES logger.debug("==> Recheck station load.") - overloaded_mv_station = checks.hv_mv_station_load(edisgo_reinforce) - if mode == "mv": - overloaded_lv_stations = pd.DataFrame() - else: + + overloaded_mv_station = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.hv_mv_station_load(edisgo_reinforce) + ) + + if mode != "mv": overloaded_lv_stations = checks.mv_lv_station_load(edisgo_reinforce) + logger.debug("==> Recheck line load.") - crit_lines = checks.mv_line_load(edisgo_reinforce) - if not mode: - crit_lines = crit_lines.append(checks.lv_line_load(edisgo_reinforce)) + + crit_lines = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.mv_line_load(edisgo_reinforce) + ) + + if not mode or mode == "lv": + crit_lines = pd.concat( + [ + crit_lines, + checks.lv_line_load(edisgo_reinforce), + ] + ) while_counter = 0 while ( @@ -469,15 +524,31 @@ def _add_transformer_changes_to_equipment_changes(mode): # run power flow analysis again (after updating pypsa object) and check # if all over-loading problems were solved logger.debug("==> Run power flow analysis.") - edisgo_reinforce.analyze(mode=mode, timesteps=timesteps_pfa) + edisgo_reinforce.analyze(mode=analyze_mode, timesteps=timesteps_pfa) + logger.debug("==> Recheck station load.") - overloaded_mv_station = checks.hv_mv_station_load(edisgo_reinforce) + overloaded_mv_station = ( + pd.DataFrame(dtype=float) + if mode == "lv" + else checks.hv_mv_station_load(edisgo_reinforce) + ) + if mode != "mv": overloaded_lv_stations = checks.mv_lv_station_load(edisgo_reinforce) + logger.debug("==> Recheck line load.") - crit_lines = checks.mv_line_load(edisgo_reinforce) - if not mode: - crit_lines = crit_lines.append(checks.lv_line_load(edisgo_reinforce)) + + crit_lines = ( + pd.DataFrame(dtype=float) if mode == "lv" else checks.mv_line_load(edisgo_reinforce) + ) + + if not mode or mode == "lv": + crit_lines = pd.concat( + [ + crit_lines, + checks.lv_line_load(edisgo_reinforce), + ] + ) iteration_step += 1 while_counter += 1 @@ -499,12 +570,12 @@ def _add_transformer_changes_to_equipment_changes(mode): ) raise exceptions.MaximumIterationError( "Overloading issues (after solving over-voltage issues) for the" - "following lines could not be solved: {}".format(crit_lines) + f"following lines could not be solved: {crit_lines}" ) else: logger.info( "==> Load issues were rechecked and solved " - "in {} iteration step(s).".format(while_counter) + f"in {while_counter} iteration step(s)." ) # final check 10% criteria diff --git a/edisgo/flex_opt/reinforce_measures.py b/edisgo/flex_opt/reinforce_measures.py index 4c791f1c1..6daa43574 100644 --- a/edisgo/flex_opt/reinforce_measures.py +++ b/edisgo/flex_opt/reinforce_measures.py @@ -11,7 +11,7 @@ from edisgo.network.grids import LVGrid, MVGrid -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def reinforce_mv_lv_station_overloading(edisgo_obj, critical_stations): @@ -187,21 +187,23 @@ def _station_overloading(edisgo_obj, critical_stations, voltage_level): # transformer of the same kind as the transformer that best # meets the missing power demand new_transformers = grid.transformers_df.loc[ - grid.transformers_df[s_max_per_trafo >= s_trafo_missing][ - "s_nom" - ].idxmin() + [ + grid.transformers_df[s_max_per_trafo >= s_trafo_missing][ + "s_nom" + ].idxmin() + ] ] - name = new_transformers.name.split("_") + name = new_transformers.index[0].split("_") name.insert(-1, "reinforced") name[-1] = len(grid.transformers_df) + 1 - new_transformers.name = "_".join([str(_) for _ in name]) + new_transformers.index = ["_".join([str(_) for _ in name])] # add new transformer to list of added transformers - transformers_changes["added"][grid_name] = [new_transformers.name] + transformers_changes["added"][grid_name] = [new_transformers.index[0]] else: # get any transformer to get attributes for new transformer from - duplicated_transformer = grid.transformers_df.iloc[0] - name = duplicated_transformer.name.split("_") + duplicated_transformer = grid.transformers_df.iloc[[0]] + name = duplicated_transformer.index[0].split("_") name.insert(-1, "reinforced") duplicated_transformer.s_nom = standard_transformer.S_nom duplicated_transformer.type_info = standard_transformer.name @@ -213,11 +215,21 @@ def _station_overloading(edisgo_obj, critical_stations, voltage_level): number_transformers = math.ceil( (s_trafo_missing + s_max_per_trafo.sum()) / standard_transformer.S_nom ) - new_transformers = pd.DataFrame() + + index = [] + for i in range(number_transformers): name[-1] = i + 1 - duplicated_transformer.name = "_".join([str(_) for _ in name]) - new_transformers = new_transformers.append(duplicated_transformer) + index.append("_".join([str(_) for _ in name])) + + if number_transformers > 1: + new_transformers = duplicated_transformer.iloc[ + np.arange(len(duplicated_transformer)).repeat(number_transformers) + ] + else: + new_transformers = duplicated_transformer.copy() + + new_transformers.index = index # add new transformer to list of added transformers transformers_changes["added"][grid_name] = new_transformers.index.values @@ -237,12 +249,18 @@ def _station_overloading(edisgo_obj, critical_stations, voltage_level): # add new transformers to topology if voltage_level == "lv": - edisgo_obj.topology.transformers_df = ( - edisgo_obj.topology.transformers_df.append(new_transformers) + edisgo_obj.topology.transformers_df = pd.concat( + [ + edisgo_obj.topology.transformers_df, + new_transformers, + ] ) else: - edisgo_obj.topology.transformers_hvmv_df = ( - edisgo_obj.topology.transformers_hvmv_df.append(new_transformers) + edisgo_obj.topology.transformers_hvmv_df = pd.concat( + [ + edisgo_obj.topology.transformers_hvmv_df, + new_transformers, + ] ) return transformers_changes @@ -293,21 +311,24 @@ def reinforce_mv_lv_station_voltage_issues(edisgo_obj, critical_stations): for grid_repr in critical_stations.keys(): grid = edisgo_obj.topology._grids[grid_repr] # get any transformer to get attributes for new transformer from - duplicated_transformer = grid.transformers_df.iloc[0] + duplicated_transformer = grid.transformers_df.iloc[[0]] # change transformer parameters - name = duplicated_transformer.name.split("_") + name = duplicated_transformer.index[0].split("_") name.insert(-1, "reinforced") name[-1] = len(grid.transformers_df) + 1 - duplicated_transformer.name = "_".join([str(_) for _ in name]) + duplicated_transformer.index = ["_".join([str(_) for _ in name])] duplicated_transformer.s_nom = standard_transformer.S_nom duplicated_transformer.r_pu = standard_transformer.r_pu duplicated_transformer.x_pu = standard_transformer.x_pu duplicated_transformer.type_info = standard_transformer.name # add new transformer to topology - edisgo_obj.topology.transformers_df = ( - edisgo_obj.topology.transformers_df.append(duplicated_transformer) + edisgo_obj.topology.transformers_df = pd.concat( + [ + edisgo_obj.topology.transformers_df, + duplicated_transformer, + ] ) - transformers_changes["added"][grid_repr] = [duplicated_transformer.name] + transformers_changes["added"][grid_repr] = duplicated_transformer.index.tolist() if transformers_changes["added"]: logger.debug( @@ -399,7 +420,7 @@ def reinforce_lines_voltage_issues(edisgo_obj, grid, crit_nodes): for repr_node in nodes_feeder.keys(): # find node farthest away - get_weight = lambda u, v, data: data["length"] + get_weight = lambda u, v, data: data["length"] # noqa: E731 path_length = 0 for n in nodes_feeder[repr_node]: path_length_dict_tmp = dijkstra_shortest_path_length( @@ -494,6 +515,7 @@ def reinforce_lines_voltage_issues(edisgo_obj, grid, crit_nodes): ] = path_length_dict[node_2_3] edisgo_obj.topology.change_line_type([crit_line_name], standard_line) lines_changes[crit_line_name] = 1 + # ToDo: Include switch disconnector if not lines_changes: logger.debug( diff --git a/edisgo/flex_opt/storage_positioning.py b/edisgo/flex_opt/storage_positioning.py index d3e801618..2d9a5edb4 100644 --- a/edisgo/flex_opt/storage_positioning.py +++ b/edisgo/flex_opt/storage_positioning.py @@ -13,7 +13,7 @@ from edisgo.flex_opt import check_tech_constraints, costs from edisgo.tools import plots, tools -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def one_storage_per_feeder( @@ -100,7 +100,9 @@ def _feeder_ranking(grid_expansion_costs): ) def _shortest_path(node): - if isinstance(node, LVStation): + # TODO: LVStation class is not used anymore + # resolve this when storage positioning is refactored + if isinstance(node, LVStation): # noqa: F821 return len(nx.shortest_path(node.mv_grid.graph, node.mv_grid.station, node)) else: return len(nx.shortest_path(node.grid.graph, node.grid.station, node)) @@ -135,14 +137,14 @@ def _find_battery_node(edisgo, critical_lines_feeder, critical_nodes_feeder): # dictionary with nodes and their corresponding path length to # MV station path_length_dict = {} - for l in critical_lines_feeder.index: - nodes = l.grid.graph.nodes_from_line(l) + for line in critical_lines_feeder.index: + nodes = line.grid.graph.nodes_from_line(line) for node in nodes: path_length_dict[node] = _shortest_path(node) # return node farthest away return [ _ - for _ in path_length_dict.keys() + for _ in path_length_dict if path_length_dict[_] == max(path_length_dict.values()) ][0] @@ -154,7 +156,7 @@ def _find_battery_node(edisgo, critical_lines_feeder, critical_nodes_feeder): node = critical_nodes_feeder[0] # get path length from station to critical node - get_weight = lambda u, v, data: data["line"].length + get_weight = lambda u, v, data: data["line"].length # noqa: E731 path_length = dijkstra_shortest_path_length( edisgo.network.mv_grid.graph, edisgo.network.mv_grid.station, @@ -200,10 +202,10 @@ def _calc_storage_size(edisgo, feeder, max_storage_size): p_slack = edisgo.network.pypsa.generators_t.p.loc[:, "Generator_slack"] * 1e3 # get sign of p and q - l = edisgo.network.pypsa.lines.loc[repr(feeder), :] + lines = edisgo.network.pypsa.lines.loc[repr(feeder), :] mv_station_bus = ( "bus0" - if l.loc["bus0"] == "Bus_".format(repr(edisgo.network.mv_grid.station)) + if lines.loc["bus0"] == f"Bus_{repr(edisgo.network.mv_grid.station)}" else "bus1" ) if mv_station_bus == "bus0": @@ -229,8 +231,8 @@ def _calc_storage_size(edisgo, feeder, max_storage_size): # get allowed load factors per case lf = { - "feedin_case": edisgo.network.config["grid_expansion_load_factors"][ - "mv_feedin_case_line" + "feed-in_case": edisgo.network.config["grid_expansion_load_factors"][ + "mv_feed-in_case_line" ], "load_case": network.config["grid_expansion_load_factors"][ "mv_load_case_line" @@ -250,9 +252,9 @@ def _calc_storage_size(edisgo, feeder, max_storage_size): q_total = q_feeder + q_storage p_hv_mv_station = p_slack - p_storage lf_ts = p_hv_mv_station.apply( - lambda _: lf["feedin_case"] if _ < 0 else lf["load_case"] + lambda _: lf["feed-in_case"] if _ < 0 else lf["load_case"] ) - s_max_ts = (p_total ** 2 + q_total ** 2).apply(sqrt).divide(lf_ts) + s_max_ts = (p_total**2 + q_total**2).apply(sqrt).divide(lf_ts) s_max.append(max(s_max_ts)) return sizes[pd.Series(s_max).idxmin()] @@ -283,12 +285,8 @@ def _critical_nodes_feeder(edisgo, feeder): critical_nodes = critical_nodes[edisgo.network.mv_grid] else: return [] - # filter nodes with voltage issues in feeder - critical_nodes_feeder = [] - for n in critical_nodes.index: - if repr(n.mv_feeder) == repr(feeder): - critical_nodes_feeder.append(n) - return critical_nodes_feeder + + return [n for n in critical_nodes.index if repr(n.mv_feeder) == repr(feeder)] def _critical_lines_feeder(edisgo, feeder): """ @@ -317,23 +315,25 @@ def _critical_lines_feeder(edisgo, feeder): # get all overloaded MV lines critical_lines = check_tech_constraints.mv_line_load(edisgo.network) # filter overloaded lines in feeder - critical_lines_feeder = [] - for l in critical_lines.index: - if repr(tools.get_mv_feeder_from_line(l)) == repr(feeder): - critical_lines_feeder.append(l) + critical_lines_feeder = [ + line + for line in critical_lines.index + if repr(tools.get_mv_feeder_from_line(line)) == repr(feeder) + ] + return critical_lines.loc[critical_lines_feeder, :] def _estimate_new_number_of_lines(critical_lines_feeder): - number_parallel_lines = 0 - for crit_line in critical_lines_feeder.index: - number_parallel_lines += ( + return sum( + ( ceil( critical_lines_feeder.loc[crit_line, "max_rel_overload"] * crit_line.quantity ) - crit_line.quantity ) - return number_parallel_lines + for crit_line in critical_lines_feeder.index + ) raise NotImplementedError @@ -479,9 +479,12 @@ def _estimate_new_number_of_lines(critical_lines_feeder): copy_graph=True, timesteps_pfa="snapshot_analysis" ) + # fmt: off total_grid_expansion_costs_new = ( - grid_expansion_results_new.grid_expansion_costs.total_costs.sum() + grid_expansion_results_new.grid_expansion_costs.total_costs.sum( + ) ) + # fmt: on costs_diff = ( total_grid_expansion_costs - total_grid_expansion_costs_new diff --git a/edisgo/io/ding0_import.py b/edisgo/io/ding0_import.py index f3c59d819..6e42f02eb 100644 --- a/edisgo/io/ding0_import.py +++ b/edisgo/io/ding0_import.py @@ -1,6 +1,5 @@ import os -import numpy as np import pandas as pd from pypsa import Network as PyPSANetwork @@ -12,7 +11,7 @@ import logging -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def import_ding0_grid(path, edisgo_obj): @@ -71,9 +70,18 @@ def sort_hvmv_transformer_buses(transformers_df): edisgo_obj.topology.buses_df = grid.buses[edisgo_obj.topology.buses_df.columns] edisgo_obj.topology.lines_df = grid.lines[edisgo_obj.topology.lines_df.columns] - grid.loads = grid.loads.rename(columns={"peak_load": "p_nom"}) + grid.loads = grid.loads.drop(columns="p_set").rename(columns={"peak_load": "p_set"}) edisgo_obj.topology.loads_df = grid.loads[edisgo_obj.topology.loads_df.columns] + # set loads without type information to be conventional loads + # this is done, as ding0 currently does not provide information on the type of load + # but ding0 grids currently also only contain conventional loads + # ToDo: Change, once information is provided by ding0 + loads_without_type = edisgo_obj.topology.loads_df[ + (edisgo_obj.topology.loads_df.type.isnull()) + | (edisgo_obj.topology.loads_df.type == "") + ].index + edisgo_obj.topology.loads_df.loc[loads_without_type, "type"] = "conventional_load" # drop slack generator from generators slack = grid.generators.loc[grid.generators.control == "Slack"].index grid.generators.drop(slack, inplace=True) @@ -120,103 +128,4 @@ def sort_hvmv_transformer_buses(transformers_df): edisgo_obj.topology._grids[str(lv_grid)] = lv_grid # Check data integrity - _validate_ding0_grid_import(edisgo_obj.topology) - - -def _validate_ding0_grid_import(topology): - """ - Check imported data integrity. Checks for duplicated labels and not - connected components. - Todo: Check with meth:`_check_integrity_of_pypsa` in pypsa_io - - Parameters - ---------- - topology: class:`~.network.topology.Topology` - topology class containing mv and lv grids - - """ - # check for duplicate labels (of components) - duplicated_labels = [] - if any(topology.buses_df.index.duplicated()): - duplicated_labels.append( - topology.buses_df.index[topology.buses_df.index.duplicated()].values - ) - if any(topology.generators_df.index.duplicated()): - duplicated_labels.append( - topology.generators_df.index[ - topology.generators_df.index.duplicated() - ].values - ) - if any(topology.loads_df.index.duplicated()): - duplicated_labels.append( - topology.loads_df.index[topology.loads_df.index.duplicated()].values - ) - if any(topology.transformers_df.index.duplicated()): - duplicated_labels.append( - topology.transformers_df.index[ - topology.transformers_df.index.duplicated() - ].values - ) - if any(topology.lines_df.index.duplicated()): - duplicated_labels.append( - topology.lines_df.index[topology.lines_df.index.duplicated()].values - ) - if any(topology.switches_df.index.duplicated()): - duplicated_labels.append( - topology.switches_df.index[topology.switches_df.index.duplicated()].values - ) - if duplicated_labels: - raise ValueError( - "{labels} have duplicate entry in one of the components " - "dataframes.".format( - labels=", ".join( - np.concatenate([list.tolist() for list in duplicated_labels]) - ) - ) - ) - - # check for isolated or not defined buses - buses = [] - - for nodal_component in [ - "loads", - "generators", - "storage_units", - ]: - df = getattr(topology, nodal_component + "_df") - missing = df.index[~df.bus.isin(topology.buses_df.index)] - buses.append(df.bus.values) - if len(missing) > 0: - raise ValueError( - "The following {} have buses which are not defined: " - "{}.".format(nodal_component, ", ".join(missing.values)) - ) - - for branch_component in ["lines", "transformers"]: - df = getattr(topology, branch_component + "_df") - for attr in ["bus0", "bus1"]: - buses.append(df[attr].values) - missing = df.index[~df[attr].isin(topology.buses_df.index)] - if len(missing) > 0: - raise ValueError( - "The following {} have {} which are not defined: " - "{}.".format(branch_component, attr, ", ".join(missing.values)) - ) - - for attr in ["bus_open", "bus_closed"]: - missing = topology.switches_df.index[ - ~topology.switches_df[attr].isin(topology.buses_df.index) - ] - buses.append(topology.switches_df[attr].values) - if len(missing) > 0: - raise ValueError( - "The following switches have {} which are not defined: " - "{}.".format(attr, ", ".join(missing.values)) - ) - - all_buses = np.unique(np.concatenate(buses, axis=None)) - missing = topology.buses_df.index[~topology.buses_df.index.isin(all_buses)] - if len(missing) > 0: - raise ValueError( - "The following buses are isolated: {}.".format(", ".join(missing.values)) - ) + edisgo_obj.topology.check_integrity() diff --git a/edisgo/io/generators_import.py b/edisgo/io/generators_import.py index e92ea53dd..9b79592d5 100755 --- a/edisgo/io/generators_import.py +++ b/edisgo/io/generators_import.py @@ -6,11 +6,10 @@ from sqlalchemy import func -from edisgo.network.timeseries import add_generators_timeseries from edisgo.tools import session_scope from edisgo.tools.geo import proj2equidistant -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) if "READTHEDOCS" not in os.environ: from egoio.db_tables import model_draft, supply @@ -192,7 +191,7 @@ def _validate_generation(): """ # set capacity difference threshold - cap_diff_threshold = 10 ** -1 + cap_diff_threshold = 10**-1 capacity_imported = ( generators_res_mv["p_nom"].sum() @@ -203,21 +202,17 @@ def _validate_generation(): capacity_grid = edisgo_object.topology.generators_df.p_nom.sum() logger.debug( - "Cumulative generator capacity (updated): {} MW".format( - round(capacity_imported, 1) - ) + f"Cumulative generator capacity (updated): {round(capacity_imported, 1)} MW" ) if abs(capacity_imported - capacity_grid) > cap_diff_threshold: raise ValueError( - "Cumulative capacity of imported generators ({} MW) " - "differs from cumulative capacity of generators " - "in updated grid ({} MW) by {} MW.".format( - round(capacity_imported, 1), - round(capacity_grid, 1), - round(capacity_imported - capacity_grid, 1), - ) + f"Cumulative capacity of imported generators (" + f"{round(capacity_imported, 1)} MW) differs from cumulative capacity of" + f" generators in updated grid ({round(capacity_grid, 1)} MW) by " + f"{round(capacity_imported - capacity_grid, 1)} MW." ) + else: logger.debug("Cumulative capacity of imported generators validated.") @@ -306,7 +301,12 @@ def _validate_sample_geno_location(): generators_conv_mv = _import_conv_generators(session) generators_res_mv, generators_res_lv = _import_res_generators(session) - generators_mv = generators_conv_mv.append(generators_res_mv) + generators_mv = pd.concat( + [ + generators_conv_mv, + generators_res_mv, + ] + ) # validate that imported generators are located inside the grid district _validate_sample_geno_location() @@ -315,19 +315,12 @@ def _validate_sample_geno_location(): edisgo_object=edisgo_object, imported_generators_mv=generators_mv, imported_generators_lv=generators_res_lv, - **kwargs + **kwargs, ) if kwargs.get("p_target", None) is None: _validate_generation() - # update time series if they were already set - if not edisgo_object.timeseries.generators_active_power.empty: - add_generators_timeseries( - edisgo_obj=edisgo_object, - generator_names=edisgo_object.topology.generators_df.index, - ) - def _update_grids( edisgo_object, @@ -337,7 +330,7 @@ def _update_grids( update_existing=True, p_target=None, allowed_number_of_comp_per_lv_bus=2, - **kwargs + **kwargs, ): """ Update network according to new generator dataset. @@ -429,14 +422,14 @@ def _check_mv_generator_geom(generator_data): return None # set capacity difference threshold - cap_diff_threshold = 10 ** -4 + cap_diff_threshold = 10**-4 # get all imported generators imported_gens = pd.concat( [imported_generators_lv, imported_generators_mv], sort=True ) - logger.debug("{} generators imported.".format(len(imported_gens))) + logger.debug(f"{len(imported_gens)} generators imported.") # get existing generators and append ID column existing_gens = edisgo_object.topology.generators_df @@ -445,9 +438,8 @@ def _check_mv_generator_geom(generator_data): ) logger.debug( - "Cumulative generator capacity (existing): {} MW".format( - round(existing_gens.p_nom.sum(), 1) - ) + "Cumulative generator capacity (existing): " + f"{round(existing_gens.p_nom.sum(), 1)} MW" ) # check if capacity of any of the imported generators is <= 0 @@ -533,10 +525,16 @@ def _check_mv_generator_geom(generator_data): new_gens_mv = imported_generators_mv[ ~imported_generators_mv.index.isin(list(existing_gens.id)) ] + new_gens_mv = new_gens_mv.assign( + p=new_gens_mv.p_nom, + ) new_gens_lv = imported_generators_lv[ ~imported_generators_lv.index.isin(list(existing_gens.id)) ] + new_gens_lv = new_gens_lv.assign( + p=new_gens_lv.p_nom, + ) if p_target is not None: @@ -610,7 +608,7 @@ def drop_generators(generator_list, gen_type, total_capacity): # drop types not in p_target from new_gens for gen_type in new_gens.generator_type.unique(): - if not gen_type in p_target.keys(): + if gen_type not in p_target.keys(): new_gens.drop( new_gens[new_gens["generator_type"] == gen_type].index, inplace=True, diff --git a/edisgo/io/pypsa_io.py b/edisgo/io/pypsa_io.py index 0b401d096..047f72992 100755 --- a/edisgo/io/pypsa_io.py +++ b/edisgo/io/pypsa_io.py @@ -1,101 +1,53 @@ """ -This module provides tools to convert graph based representation of the network +This module provides tools to convert eDisGo representation of the network topology to PyPSA data model. Call :func:`to_pypsa` to retrieve the PyPSA network container. """ import collections +import logging from math import sqrt import numpy as np import pandas as pd -from networkx import connected_components from pypsa import Network as PyPSANetwork from pypsa.io import import_series_from_dataframe +logger = logging.getLogger(__name__) -def to_pypsa(grid_object, timesteps, **kwargs): + +def to_pypsa(edisgo_object, mode=None, timesteps=None, **kwargs): """ - Export edisgo object to PyPSA Network - - For details from a user perspective see API documentation of - :meth:`~edisgo.EDisGo.analyze` of the API class - :class:`~.edisgo.EDisGo`. - - Translating eDisGo's network topology to PyPSA representation is structured - into translating the topology and adding time series for components of the - network. In both cases translation of MV network only (`mode='mv'`, - `mode='mvlv'`), LV network only(`mode='lv'`), MV and LV (`mode=None`) - share some code. The code is organized as follows: - - * Medium-voltage only (`mode='mv'`): All medium-voltage network components - are exported including the medium voltage side of LV station. - Transformers are not exported in this mode. LV network load - and generation is considered using :func:`append_lv_components`. - Time series are collected and imported to PyPSA network. - * Medium-voltage including transformers (`mode='mvlv'`). Works similar as - the first mode, only attaching LV components to the LV side of the - LVStation and therefore also adding the transformers to the PyPSA network. - * Low-voltage only (`mode='lv'`): LV network topology including the MV-LV - transformer is exported. The slack is defind at primary side of the MV-LV - transformer. - * Both level MV+LV (`mode=None`): The entire network topology is translated to - PyPSA in order to perform a complete power flow analysis in both levels - together. First, both network levels are translated seperately and then - merged. Time series are obtained at once for both network levels. - - This PyPSA interface is aware of translation errors and performs so checks - on integrity of data converted to PyPSA network representation - - * Sub-graphs/ Sub-networks: It is ensured the network has no islanded parts - * Completeness of time series: It is ensured each component has a time - series - * Buses available: Each component (load, generator, line, transformer) is - connected to a bus. The PyPSA representation is check for completeness of - buses. - * Duplicate labels in components DataFrames and components' time series - DataFrames + Convert grid to :pypsa:`PyPSA.Network` representation. + + You can choose between translation of the MV and all underlying LV grids + (mode=None (default)), the MV network only (mode='mv' or mode='mvlv') or a + single LV network (mode='lv'). Parameters ---------- - grid_object: :class:`~.EDisGo` or :class:`~.network.grids.Grid` - EDisGo or grid object + edisgo_object : :class:`~.EDisGo` + EDisGo object containing grid topology and time series information. mode : str Determines network levels that are translated to - `PyPSA network representation - `_. Specify - - * None to export MV and LV network levels. None is the default. - * 'mv' to export MV network level only. This includes cumulative load - and generation from underlying LV network aggregated at respective LV - station's primary side. - * 'mvlv' to export MV network level only. This includes cumulative load - and generation from underlying LV network aggregated at respective LV - station's secondary side. - #ToDo change name of this mode or use kwarg to define where to aggregate lv loads and generation - * 'lv' to export specified LV network only. + :pypsa:`PyPSA.Network`. + See `mode` parameter in :attr:`~.edisgo.EDisGo.to_pypsa` for more information. timesteps : :pandas:`pandas.DatetimeIndex` or \ :pandas:`pandas.Timestamp` - Timesteps specifies which time steps to export to pypsa representation - and use in power flow analysis. + See `timesteps` parameter in :attr:`~.edisgo.EDisGo.to_pypsa` for more + information. Other Parameters ----------------- - use_seed : bool - Use a seed for the initial guess for the Newton-Raphson algorithm. - Only available when MV level is included in the power flow analysis. - If True, uses voltage magnitude results of previous power flow - analyses as initial guess in case of PQ buses. PV buses currently do - not occur and are therefore currently not supported. - Default: False. + See other parameters in :attr:`~.edisgo.EDisGo.to_pypsa` for more + information. Returns ------- - :pypsa:`pypsa.Network` - The `PyPSA network - `_ container. + :pypsa:`PyPSA.Network` + :pypsa:`PyPSA.Network` representation. """ @@ -103,7 +55,7 @@ def _set_slack(grid): """ Sets slack at given grid's station secondary side. - It is assumed that bus of secondary side is always given in + It is assumed that the secondary side bus is always given in transformer's bus1. Parameters @@ -120,12 +72,13 @@ def _set_slack(grid): index=["Generator_slack"], ) - mode = kwargs.get("mode", None) aggregate_loads = kwargs.get("aggregate_loads", None) aggregate_generators = kwargs.get("aggregate_generators", None) aggregate_storages = kwargs.get("aggregate_storages", None) aggregated_lv_components = {"Generator": {}, "Load": {}, "StorageUnit": {}} + if timesteps is None: + timesteps = edisgo_object.timeseries.timeindex # check if timesteps is array-like, otherwise convert to list (necessary # to obtain a dataframe when using .loc in time series functions) if not hasattr(timesteps, "__len__"): @@ -135,30 +88,27 @@ def _set_slack(grid): pypsa_network = PyPSANetwork() pypsa_network.set_snapshots(timesteps) - # define edisgo_obj, buses_df, slack_df and components for each use case + # define buses_df, slack_df and components for each use case if mode is None: pypsa_network.mode = "mv" - edisgo_obj = grid_object - buses_df = grid_object.topology.buses_df.loc[:, ["v_nom"]] - slack_df = _set_slack(edisgo_obj.topology.mv_grid) + buses_df = edisgo_object.topology.buses_df.loc[:, ["v_nom"]] + slack_df = _set_slack(edisgo_object.topology.mv_grid) components = { - "Load": grid_object.topology.loads_df.loc[:, ["bus", "p_nom"]].rename( - columns={"p_nom": "p_set"} - ), - "Generator": grid_object.topology.generators_df.loc[ + "Load": edisgo_object.topology.loads_df.loc[:, ["bus", "p_set"]], + "Generator": edisgo_object.topology.generators_df.loc[ :, ["bus", "control", "p_nom"] ], - "StorageUnit": grid_object.topology.storage_units_df.loc[ + "StorageUnit": edisgo_object.topology.storage_units_df.loc[ :, ["bus", "control"] ], - "Line": grid_object.topology.lines_df.loc[ + "Line": edisgo_object.topology.lines_df.loc[ :, ["bus0", "bus1", "x", "r", "s_nom", "num_parallel", "length"], ], - "Transformer": grid_object.topology.transformers_df.loc[ + "Transformer": edisgo_object.topology.transformers_df.loc[ :, ["bus0", "bus1", "x_pu", "r_pu", "type_info", "s_nom"] ].rename(columns={"r_pu": "r", "x_pu": "x"}), } @@ -167,7 +117,7 @@ def _set_slack(grid): pypsa_network.mode = "mv" - edisgo_obj = grid_object.edisgo_obj + grid_object = edisgo_object.topology.mv_grid buses_df = grid_object.buses_df.loc[:, ["v_nom"]] slack_df = _set_slack(grid_object) @@ -178,10 +128,10 @@ def _set_slack(grid): ) if mode == "mv": - mv_components["Transformer"] = pd.DataFrame() + mv_components["Transformer"] = pd.DataFrame(dtype=float) elif mode == "mvlv": # get all MV/LV transformers - mv_components["Transformer"] = edisgo_obj.topology.transformers_df.loc[ + mv_components["Transformer"] = edisgo_object.topology.transformers_df.loc[ :, ["bus0", "bus1", "x_pu", "r_pu", "type_info", "s_nom"] ].rename(columns={"r_pu": "r", "x_pu": "x"}) else: @@ -193,7 +143,9 @@ def _set_slack(grid): "Generator": ["generators_df"], "StorageUnit": ["storage_units_df"], } - lv_components = {key: pd.DataFrame() for key in lv_components_to_aggregate} + lv_components = { + key: pd.DataFrame(dtype=float) for key in lv_components_to_aggregate + } for lv_grid in grid_object.lv_grids: if mode == "mv": @@ -202,18 +154,18 @@ def _set_slack(grid): lv_grid.transformers_df.bus0.unique() ] elif mode == "mvlv": - # get secondary side of station to append loads and generators - # to + # get secondary side of station to append loads and generators to station_bus = lv_grid.buses_df.loc[ [lv_grid.transformers_df.bus1.unique()[0]] ] - buses_df = buses_df.append(station_bus.loc[:, ["v_nom"]]) + buses_df = pd.concat([buses_df, station_bus.loc[:, ["v_nom"]]]) # handle one gate components for comp, dfs in lv_components_to_aggregate.items(): - comps = pd.DataFrame() + comps = pd.DataFrame(dtype=float) for df in dfs: comps_tmp = getattr(lv_grid, df).copy() - comps = comps.append(comps_tmp) + comps = pd.concat([comps, comps_tmp]) + comps.bus = station_bus.index.values[0] aggregated_lv_components[comp].update( _append_lv_components( @@ -231,21 +183,30 @@ def _set_slack(grid): components = collections.defaultdict(pd.DataFrame) for comps in (mv_components, lv_components): for key, value in comps.items(): - components[key] = components[key].append(value) + components[key] = pd.concat( + [ + components[key], + value, + ] + ) elif mode == "lv": pypsa_network.mode = "lv" - edisgo_obj = grid_object.edisgo_obj + lv_grid_name = kwargs.get("lv_grid_name", None) + if not lv_grid_name: + raise ValueError( + "For exporting lv grids, name of lv_grid has to be provided." + ) + grid_object = edisgo_object.topology._grids[lv_grid_name] buses_df = grid_object.buses_df.loc[:, ["v_nom"]] slack_df = _set_slack(grid_object) components = _get_grid_component_dict(grid_object) else: raise ValueError( - "Provide proper mode or leave it empty to export " - "entire network topology." + "Provide proper mode or leave it empty to export entire network topology." ) # import network topology to PyPSA network @@ -261,7 +222,7 @@ def _set_slack(grid): import_series_from_dataframe( pypsa_network, _buses_voltage_set_point( - edisgo_obj, + edisgo_object, buses_df.index, slack_df.loc["Generator_slack", "bus"], timesteps, @@ -286,7 +247,7 @@ def _set_slack(grid): generators_timeseries_active, generators_timeseries_reactive, ) = _get_timeseries_with_aggregated_elements( - edisgo_obj, + edisgo_object, timesteps, ["generators"], components["Generator"].index, @@ -294,12 +255,12 @@ def _set_slack(grid): ) else: generators_timeseries_active = ( - edisgo_obj.timeseries.generators_active_power.loc[ + edisgo_object.timeseries.generators_active_power.loc[ timesteps, components["Generator"].index ] ) generators_timeseries_reactive = ( - edisgo_obj.timeseries.generators_reactive_power.loc[ + edisgo_object.timeseries.generators_reactive_power.loc[ timesteps, components["Generator"].index ] ) @@ -317,27 +278,21 @@ def _set_slack(grid): loads_timeseries_active, loads_timeseries_reactive, ) = _get_timeseries_with_aggregated_elements( - edisgo_obj, + edisgo_object, timesteps, - ["loads", "charging_points"], + ["loads"], components["Load"].index, aggregated_lv_components["Load"], ) else: - loads_timeseries_active = pd.concat( - [ - edisgo_obj.timeseries.loads_active_power, - edisgo_obj.timeseries.charging_points_active_power, - ], - axis=1, - ).loc[timesteps, components["Load"].index] - loads_timeseries_reactive = pd.concat( - [ - edisgo_obj.timeseries.loads_reactive_power, - edisgo_obj.timeseries.charging_points_reactive_power, - ], - axis=1, - ).loc[timesteps, components["Load"].index] + loads_timeseries_active = edisgo_object.timeseries.loads_active_power.loc[ + timesteps, components["Load"].index + ] + loads_timeseries_reactive = ( + edisgo_object.timeseries.loads_reactive_power.loc[ + timesteps, components["Load"].index + ] + ) import_series_from_dataframe( pypsa_network, loads_timeseries_active, "Load", "p_set" ) @@ -351,7 +306,7 @@ def _set_slack(grid): storages_timeseries_active, storages_timeseries_reactive, ) = _get_timeseries_with_aggregated_elements( - edisgo_obj, + edisgo_object, timesteps, ["storage_units"], components["StorageUnit"].index, @@ -359,12 +314,12 @@ def _set_slack(grid): ) else: storages_timeseries_active = ( - edisgo_obj.timeseries.storage_units_active_power.loc[ + edisgo_object.timeseries.storage_units_active_power.loc[ timesteps, components["StorageUnit"].index ] ) storages_timeseries_reactive = ( - edisgo_obj.timeseries.storage_units_reactive_power.loc[ + edisgo_object.timeseries.storage_units_reactive_power.loc[ timesteps, components["StorageUnit"].index ] ) @@ -382,9 +337,7 @@ def _set_slack(grid): ) if kwargs.get("use_seed", False) and pypsa_network.mode == "mv": - set_seed(edisgo_obj, pypsa_network) - - _check_integrity_of_pypsa(pypsa_network) + set_seed(edisgo_object, pypsa_network) return pypsa_network @@ -482,24 +435,24 @@ def set_seed(edisgo_obj, pypsa_network): def _get_grid_component_dict(grid_object): """ - Method to extract component dictionary from given grid object. Components - are devided into "Load", "Generator", "StorageUnit" and "Line". Used for - translation into pypsa network. + Method to extract component dictionary from given grid object. + + Components are divided into "Load", "Generator", "StorageUnit" and "Line". Used for + translation to pypsa network. Parameters ---------- - grid_object: MV or LV grid object + grid_object : :class:`~.network.grids.Grid` Returns ------- dict Component dictionary divided into "Load", "Generator", "StorageUnit" - and "Line" + and "Line". + """ components = { - "Load": grid_object.loads_df.loc[:, ["bus", "p_nom"]].rename( - columns={"p_nom": "p_set"} - ), + "Load": grid_object.loads_df.loc[:, ["bus", "p_set"]], "Generator": grid_object.generators_df.loc[:, ["bus", "control", "p_nom"]], "StorageUnit": grid_object.storage_units_df.loc[:, ["bus", "control"]], "Line": grid_object.lines_df.loc[ @@ -520,50 +473,54 @@ def _append_lv_components( aggregate_storages=None, ): """ - Method to append lv components to component dictionary. Used when only - exporting mv grid topology. All underlaying LV components of an LVGrid are - then connected to one side of the LVStation. If required, the LV components - can be aggregated in different modes. As an example, loads can be + Method to append LV components to component dictionary. + + Used when only exporting mv grid topology. All underlying LV components of an + LVGrid are then connected to one side of the LVStation. If required, the LV + components can be aggregated in different modes. As an example, loads can be aggregated sector-wise or all loads can be aggregated into one - representative load. The sum of p_nom or peak_load of all cumulated - components is calculated. + representative load. The sum of p_nom/p_set of all cumulated components is + calculated. Parameters ---------- - comp: str - indicator for component type, can be 'Load', 'Generator' or - 'StorageUnit' - comps: `pandas.DataFrame` - component dataframe of elements to be aggregated - lv_components: dict - dictionary of LV grid components, keys are the 'Load', 'Generator' and - 'StorageUnit' - lv_grid_name: str - representative of LV grid of which components are aggregated - aggregate_loads: str - mode for load aggregation, can be 'sectoral' aggregating the loads - sector-wise or 'all' aggregating all loads into one. Defaults to None, + comp : str + Indicator for component type to aggregate. Can be 'Load', 'Generator' or + 'StorageUnit'. + comps : `pandas.DataFrame` + Component dataframe of elements to be aggregated. + lv_components : dict + Dictionary of LV grid components, keys are the 'Load', 'Generator' and + 'StorageUnit'. + lv_grid_name : str + Representative of LV grid of which components are aggregated. + aggregate_loads : str + Mode for load aggregation. Can be 'sectoral' aggregating the loads + sector-wise, 'all' aggregating all loads into one or None, not aggregating loads but appending them to the station one by one. - aggregate_generators: str - mode for generator aggregation, can be 'type' resulting in + Default: None. + aggregate_generators : str + Mode for generator aggregation. Can be 'type' resulting in an aggregated generator for each generator type, 'curtailable' aggregating 'solar' and 'wind' generators into one and all other generators into - another generator. Defaults to None, when no aggregation is undertaken - and generators are addded one by one. - aggregate_storages: str - mode for storage unit aggregation. Can be 'all' where all storage units - in the grid are replaced by one storage. Defaults to None, where no - aggregation is conducted and storage units are added one by one. + another one, or None, where no aggregation is undertaken + and generators are added one by one. Default: None. + aggregate_storages : str + Mode for storage unit aggregation. Can be 'all' where all + storage units are aggregated to one storage unit or None, in + which case no aggregation is conducted and storage units are added one by + one. Default: None. Returns ------- dict - dict of aggregated elements for timeseries creation. Keys are names - of aggregated elements and entries is a list of the names of all + Dictionary of aggregated elements for time series creation. Keys are names + of aggregated elements and values are a list of the names of all components aggregated in that respective key component. - An example could look the following way: + An example could look as follows: {'LVGrid_1_loads': ['Load_agricultural_LVGrid_1_1', 'Load_retail_LVGrid_1_2']} + """ aggregated_elements = {} if len(comps) > 0: @@ -572,15 +529,12 @@ def _append_lv_components( return {} if comp == "Load": if aggregate_loads is None: - comps_aggr = comps.loc[:, ["bus", "p_nom"]].rename( - columns={"p_nom": "p_set"} - ) + comps_aggr = comps.loc[:, ["bus", "p_set"]] elif aggregate_loads == "sectoral": comps_aggr = ( - comps.loc[:, ["p_nom", "sector"]] + comps.loc[:, ["p_set", "sector"]] .groupby("sector") .sum() - .rename(columns={"p_nom": "p_set"}) .loc[:, ["p_set"]] ) for sector in comps_aggr.index.values: @@ -591,13 +545,20 @@ def _append_lv_components( comps_aggr["bus"] = bus elif aggregate_loads == "all": comps_aggr = pd.DataFrame( - {"bus": [bus], "p_set": [sum(comps.p_nom)]}, + {"bus": [bus], "p_set": [sum(comps.p_set)]}, index=[lv_grid_name + "_loads"], ) aggregated_elements[lv_grid_name + "_loads"] = comps.index.values else: raise ValueError("Aggregation type for loads invalid.") - lv_components[comp] = lv_components[comp].append(comps_aggr) + + lv_components[comp] = pd.concat( + [ + lv_components[comp], + comps_aggr, + ] + ) + elif comp == "Generator": flucts = ["wind", "solar"] if aggregate_generators is None: @@ -618,33 +579,40 @@ def _append_lv_components( elif aggregate_generators == "curtailable": comps_fluct = comps[comps.type.isin(flucts)] comps_disp = comps[~comps.index.isin(comps_fluct.index)] - comps_aggr = pd.DataFrame(columns=["bus", "control", "p_nom"]) + comps_aggr = pd.DataFrame(columns=["bus", "control", "p_nom"], dtype=float) if len(comps_fluct) > 0: - comps_aggr = comps_aggr.append( - pd.DataFrame( - { - "bus": [bus], - "control": ["PQ"], - "p_nom": [sum(comps_fluct.p_nom)], - "fluctuating": [True], - }, - index=[lv_grid_name + "_fluctuating"], - ) + comps_aggr = pd.concat( + [ + comps_aggr, + pd.DataFrame( + { + "bus": [bus], + "control": ["PQ"], + "p_nom": [sum(comps_fluct.p_nom)], + "fluctuating": [True], + }, + index=[lv_grid_name + "_fluctuating"], + ), + ] ) aggregated_elements[ lv_grid_name + "_fluctuating" ] = comps_fluct.index.values + if len(comps_disp) > 0: - comps_aggr = comps_aggr.append( - pd.DataFrame( - { - "bus": [bus], - "control": ["PQ"], - "p_nom": [sum(comps_disp.p_nom)], - "fluctuating": [False], - }, - index=[lv_grid_name + "_dispatchable"], - ) + comps_aggr = pd.concat( + [ + comps_aggr, + pd.DataFrame( + { + "bus": [bus], + "control": ["PQ"], + "p_nom": [sum(comps_disp.p_nom)], + "fluctuating": [False], + }, + index=[lv_grid_name + "_dispatchable"], + ), + ] ) aggregated_elements[ lv_grid_name + "_dispatchable" @@ -668,9 +636,16 @@ def _append_lv_components( aggregated_elements[lv_grid_name + "_generators"] = comps.index.values else: raise ValueError("Aggregation type for generators invalid.") - lv_components[comp] = lv_components[comp].append(comps_aggr) + + lv_components[comp] = pd.concat( + [ + lv_components[comp], + comps_aggr, + ] + ) + elif comp == "StorageUnit": - if aggregate_storages == None: + if aggregate_storages is None: comps_aggr = comps.loc[:, ["bus", "control"]] elif aggregate_storages == "all": comps_aggr = pd.DataFrame( @@ -680,7 +655,14 @@ def _append_lv_components( aggregated_elements[lv_grid_name + "_storages"] = comps.index.values else: raise ValueError("Aggregation type for storages invalid.") - lv_components[comp] = lv_components[comp].append(comps_aggr) + + lv_components[comp] = pd.concat( + [ + lv_components[comp], + comps_aggr, + ] + ) + else: raise ValueError("Component type not defined.") @@ -691,34 +673,35 @@ def _get_timeseries_with_aggregated_elements( edisgo_obj, timesteps, element_types, elements, aggr_dict ): """ - Creates timeseries for aggregated LV components by summing up the single - timeseries and adding the respective entry to edisgo_obj.timeseries. + Creates time series for aggregated LV components by summing up the single + time series. Parameters ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - the eDisGo network container - timesteps: timesteps of format :pandas:`pandas.Timestamp` - index timesteps for component's load or generation timeseries - element_types: list of str - type of element which was aggregated. Can be 'loads', 'generators' or + edisgo_obj : :class:`~.self.edisgo.EDisGo` + eDisGo object + timesteps : :pandas:`pandas.DatetimeIndex` + Time steps to export to pypsa representation. + element_types : list(str) + Type of element which was aggregated. Can be 'loads', 'generators' or 'storage_units' elements: `pandas.DataFrame` - component dataframe of all elements for which timeseries are added + Component dataframe of all elements for which time series are added. aggr_dict: dict - dictionary containing aggregated elements as values and the + Dictionary containing aggregated elements as values and the representing new component as key. See :meth:`_append_lv_components` for structure of dictionary. Returns ------- - tuple of `pandas.DataFrame` - active and reactive power timeseries for chosen elements. Dataframes + tuple(`pandas.DataFrame`) + Active and reactive power time series for chosen elements. Dataframes with timesteps as index and name of elements as columns. + """ # get relevant timeseries - elements_timeseries_active_all = pd.DataFrame() - elements_timeseries_reactive_all = pd.DataFrame() + elements_timeseries_active_all = pd.DataFrame(dtype=float) + elements_timeseries_reactive_all = pd.DataFrame(dtype=float) for element_type in element_types: elements_timeseries_active_all = pd.concat( [ @@ -769,7 +752,7 @@ def _buses_voltage_set_point(edisgo_obj, buses, slack_bus, timesteps): Parameters ---------- edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container + eDisGo object timesteps : array_like Timesteps is an array-like object with entries of type :pandas:`pandas.Timestamp` specifying which time steps @@ -812,110 +795,9 @@ def _buses_voltage_set_point(edisgo_obj, buses, slack_bus, timesteps): return v_nom -def _check_integrity_of_pypsa(pypsa_network): - """ - Checks whether the provided pypsa network is calculable. - - Isolated nodes, - duplicate labels, that every load, generator and storage unit has a - time series for active and reactive power, and completeness of buses and branch elements are checked. - - Parameters - ---------- - pypsa_network: :pypsa:`pypsa.Network` - The `PyPSA network - `_ container. - - """ - - # check for sub-networks - subgraphs = list( - pypsa_network.graph().subgraph(c) - for c in connected_components(pypsa_network.graph()) - ) - pypsa_network.determine_network_topology() - - if len(subgraphs) > 1 or len(pypsa_network.sub_networks) > 1: - raise ValueError("The pypsa graph has isolated nodes or edges.") - - # check for duplicate labels of components - comps_dfs = [ - pypsa_network.buses, - pypsa_network.generators, - pypsa_network.loads, - pypsa_network.storage_units, - pypsa_network.transformers, - pypsa_network.lines, - ] - for comp_type in comps_dfs: - if any(comp_type.index.duplicated()): - raise ValueError( - "Pypsa network has duplicated entries: {}.".format( - comp_type.index.duplicated() - ) - ) - - # check consistency of topology and time series data - comp_df_dict = { - # exclude Slack from check - "gens": pypsa_network.generators[pypsa_network.generators.control != "Slack"], - "loads": pypsa_network.loads, - "storage_units": pypsa_network.storage_units, - } - comp_ts_dict = { - "gens": pypsa_network.generators_t, - "loads": pypsa_network.loads_t, - "storage_units": pypsa_network.storage_units_t, - } - for comp_type, ts in comp_ts_dict.items(): - for i in ["p_set", "q_set"]: - missing = comp_df_dict[comp_type].loc[ - ~comp_df_dict[comp_type].index.isin(ts[i].dropna(axis=1).columns) - ] - if not missing.empty: - raise ValueError( - "The following components have no `{}` time " - "series: {}.".format(i, missing.index) - ) - - missing = pypsa_network.buses.loc[ - ~pypsa_network.buses.index.isin( - pypsa_network.buses_t["v_mag_pu_set"].columns.tolist() - ) - ] - if not missing.empty: - raise ValueError( - "The following components have no `v_mag_pu_set` time " - "series: {}.".format(missing.index) - ) - - # check for duplicates in p_set and q_set - comp_ts = [ - pypsa_network.loads_t, - pypsa_network.generators_t, - pypsa_network.storage_units_t, - ] - for comp in comp_ts: - for i in ["p_set", "q_set"]: - if any(comp[i].columns.duplicated()): - raise ValueError( - "Pypsa timeseries have duplicated entries: {}".format( - comp[i].columns.duplicated() - ) - ) - - if any(pypsa_network.buses_t["v_mag_pu_set"].columns.duplicated()): - raise ValueError( - "Pypsa timeseries have duplicated entries: {}".format( - pypsa_network.buses_t["v_mag_pu_set"].columns.duplicated() - ) - ) - - def process_pfa_results(edisgo, pypsa, timesteps): """ - Passing power flow results from PyPSA to - :class:`~.network.results.Results`. + Passing power flow results from PyPSA to :class:`~.network.results.Results`. Parameters ---------- @@ -923,7 +805,8 @@ def process_pfa_results(edisgo, pypsa, timesteps): pypsa : :pypsa:`pypsa.Network` The PyPSA `Network container `_ - timesteps : :pandas:`pandas.DatetimeIndex` or :pandas:`pandas.Timestamp` + timesteps : :pandas:`pandas.DatetimeIndex` or \ + :pandas:`pandas.Timestamp` Time steps for which latest power flow analysis was conducted and for which to retrieve pypsa results. @@ -945,6 +828,7 @@ def process_pfa_results(edisgo, pypsa, timesteps): """ # get the absolute losses in the system (in MW and Mvar) # subtracting total generation (including slack) from total load + # ToDo include storage units grid_losses = { "p": ( abs(pypsa.generators_t["p"].sum(axis=1) - pypsa.loads_t["p"].sum(axis=1)) diff --git a/edisgo/io/timeseries_import.py b/edisgo/io/timeseries_import.py index d1b132a72..90a9cae02 100644 --- a/edisgo/io/timeseries_import.py +++ b/edisgo/io/timeseries_import.py @@ -15,7 +15,8 @@ def feedin_oedb(config_data, weather_cell_ids, timeindex): """ - Import feed-in time series data for wind and solar power plants from oedb. + Import feed-in time series data for wind and solar power plants from the + `OpenEnergy DataBase `_. Parameters ---------- @@ -100,7 +101,7 @@ def _retrieve_timeseries_from_oedb(session, timeindex): return feedin.loc[timeindex] -def load_time_series_demandlib(config_data, year): +def load_time_series_demandlib(config_data, timeindex): """ Get normalized sectoral electricity load time series using the `demandlib `_. @@ -115,8 +116,8 @@ def load_time_series_demandlib(config_data, year): config_data : :class:`~.tools.config.Config` Configuration data from config files, relevant for industrial load profiles. - year : int - Year for which to generate load time series. + timeindex : :pandas:`pandas.DatetimeIndex` + Timesteps for which to generate load time series. Returns ------- @@ -127,6 +128,7 @@ def load_time_series_demandlib(config_data, year): hold the sector type. """ + year = timeindex[0].year sectoral_consumption = {"h0": 1, "g0": 1, "i0": 1, "l0": 1} @@ -180,4 +182,4 @@ def load_time_series_demandlib(config_data, year): inplace=True, ) - return elec_demand + return elec_demand.loc[timeindex] diff --git a/edisgo/network/components.py b/edisgo/network/components.py index 4ef428f97..1b67284fc 100644 --- a/edisgo/network/components.py +++ b/edisgo/network/components.py @@ -8,7 +8,7 @@ if "READTHEDOCS" not in os.environ: from shapely.geometry import Point -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) class BasicComponent(ABC): @@ -181,73 +181,6 @@ def __repr__(self): return "_".join([self.__class__.__name__, str(self._id)]) -# ToDo implement if needed -# class Station(Component): -# """Station object (medium or low voltage) -# -# Represents a station, contains transformers. -# -# Attributes -# ---------- -# """ -# -# def __init__(self, **kwargs): -# super().__init__(**kwargs) -# -# self._transformers = kwargs.get('transformers', None) -# -# @property -# def transformers(self): -# """:obj:`list` of :class:`Transformer` : Transformers located in -# station""" -# return self._transformers -# -# @transformers.setter -# def transformers(self, transformer): -# """ -# Parameters -# ---------- -# transformer : :obj:`list` of :class:`Transformer` -# """ -# self._transformers = transformer -# -# def add_transformer(self, transformer): -# self._transformers.append(transformer) -# -# -# class Transformer(Component): -# """Transformer object -# -# Attributes -# ---------- -# _voltage_op : :obj:`float` -# Operational voltage -# _type : :pandas:`pandas.DataFrame` -# Specification of type, refers to ToDo: ADD CORRECT REF TO (STATIC) DATA -# """ -# -# def __init__(self, **kwargs): -# super().__init__(**kwargs) -# self._mv_grid = kwargs.get('mv_grid', None) -# self._voltage_op = kwargs.get('voltage_op', None) -# self._type = kwargs.get('type', None) -# -# @property -# def mv_grid(self): -# return self._mv_grid -# -# @property -# def voltage_op(self): -# return self._voltage_op -# -# @property -# def type(self): -# return self._type -# -# def __repr__(self): -# return str(self._id) - - class Load(Component): """ Load object @@ -273,7 +206,7 @@ def _network_component_df(self): return self.topology.loads_df @property - def p_nom(self): + def p_set(self): """ Peak load in MW. @@ -288,11 +221,11 @@ def p_nom(self): Peak load in MW. """ - return self.topology.loads_df.at[self.id, "p_nom"] + return self.topology.loads_df.at[self.id, "p_set"] - @p_nom.setter - def p_nom(self, p_nom): - self.topology._loads_df.at[self.id, "p_nom"] = float(p_nom) + @p_set.setter + def p_set(self, p_set): + self.topology._loads_df.at[self.id, "p_set"] = float(p_set) @property def annual_consumption(self): @@ -421,7 +354,8 @@ def nominal_power(self): Nominal power of generator in MW. """ - # ToDo: Should this change the time series as well? (same for loads, and type setter...) + # TODO: Should this change the time series as well? + # (same for loads, and type setter...) return self.topology.generators_df.at[self.id, "p_nom"] @nominal_power.setter @@ -1005,118 +939,3 @@ def _get_bus_column(self, bus): else: return None return col - - -# ToDo implement if needed -# class MVStation(Station): -# """MV Station object""" -# -# def __init__(self, **kwargs): -# super().__init__(**kwargs) -# -# def __repr__(self, side=None): -# repr_base = super().__repr__() -# -# # As we don't consider HV-MV transformers in PFA, we don't have to care -# # about primary side bus of MV station. Hence, the general repr() -# # currently returned, implicitely refers to the secondary side (MV level) -# # if side == 'hv': -# # return '_'.join(['primary', repr_base]) -# # elif side == 'mv': -# # return '_'.join(['secondary', repr_base]) -# # else: -# # return repr_base -# return repr_base -# -# -# class LVStation(Station): -# """LV Station object""" -# -# def __init__(self, **kwargs): -# super().__init__(**kwargs) -# self._mv_grid = kwargs.get('mv_grid', None) -# -# @property -# def mv_grid(self): -# return self._mv_grid -# -# def __repr__(self, side=None): -# repr_base = super().__repr__() -# -# if side == 'mv': -# return '_'.join(['primary', repr_base]) -# elif side == 'lv': -# return '_'.join(['secondary', repr_base]) -# else: -# return repr_base - -# ToDo Implement if necessary -# class Line(Component): -# """ -# Line object -# -# Parameters -# ---------- -# _type: :pandas:`pandas.Series` -# Equipment specification including R and X for power flow analysis -# Columns: -# -# ======== ================== ====== ========= -# Column Description Unit Data type -# ======== ================== ====== ========= -# name Name (e.g. NAYY..) - str -# U_n Nominal voltage kV int -# I_max_th Max. th. current A float -# R Resistance Ohm/km float -# L Inductance mH/km float -# C Capacitance uF/km float -# Source Data source - str -# ============================================ -# -# _length: float -# Length of the line calculated in linear distance. Unit: m -# _quantity: float -# Quantity of parallel installed lines. -# _kind: String -# Specifies whether the line is an underground cable ('cable') or an -# overhead line ('line'). -# """ -# -# def __init__(self, **kwargs): -# super().__init__(**kwargs) -# self._type = kwargs.get('type', None) -# self._length = kwargs.get('length', None) -# self._quantity = kwargs.get('quantity', 1) -# self._kind = kwargs.get('kind', None) -# -# @property -# def geom(self): -# """Provide :shapely:`Shapely LineString object` geometry of -# :class:`Line`""" -# adj_nodes = self._grid._graph.nodes_from_line(self) -# -# return LineString([adj_nodes[0].geom, adj_nodes[1].geom]) -# -# @property -# def type(self): -# return self._type -# -# @type.setter -# def type(self, new_type): -# self._type = new_type -# -# @property -# def length(self): -# return self._length -# -# @length.setter -# def length(self, new_length): -# self._length = new_length -# -# @property -# def quantity(self): -# return self._quantity -# -# @quantity.setter -# def quantity(self, new_quantity): -# self._quantity = new_quantity diff --git a/edisgo/network/grids.py b/edisgo/network/grids.py index f3a25c35c..a3ed2d1af 100644 --- a/edisgo/network/grids.py +++ b/edisgo/network/grids.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC, abstractmethod import matplotlib.pyplot as plt @@ -7,6 +9,7 @@ from networkx.drawing.nx_pydot import graphviz_layout from edisgo.network.components import Generator, Load, Switch +from edisgo.tools.geopandas_helper import to_geopandas from edisgo.tools.networkx_helper import translate_df_to_graph @@ -76,6 +79,24 @@ def graph(self): """ return translate_df_to_graph(self.buses_df, self.lines_df) + @property + def geopandas(self): + """ + Returns components as :geopandas:`GeoDataFrame`\\ s + + Returns container with :geopandas:`GeoDataFrame`\\ s containing all + georeferenced components within the grid. + + Returns + ------- + :class:`~.tools.geopandas_helper.GeoPandasGridContainer` or \ + list(:class:`~.tools.geopandas_helper.GeoPandasGridContainer`) + Data container with GeoDataFrames containing all georeferenced components + within the grid(s). + + """ + return to_geopandas(self) + @property def station(self): """ @@ -143,8 +164,8 @@ def loads(self): List of loads within the network. """ - for l in self.loads_df.index: - yield Load(id=l, edisgo_obj=self.edisgo_obj) + for load in self.loads_df.index: + yield Load(id=load, edisgo_obj=self.edisgo_obj) @property def storage_units_df(self): @@ -173,12 +194,10 @@ def charging_points_df(self): :pandas:`pandas.DataFrame` Dataframe with all charging points in topology. For more information on the dataframe see - :attr:`~.network.topology.Topology.charging_points_df`. + :attr:`~.network.topology.Topology.loads_df`. """ - return self.edisgo_obj.topology.charging_points_df[ - self.edisgo_obj.topology.charging_points_df.bus.isin(self.buses_df.index) - ] + return self.loads_df[self.loads_df.type == "charging_point"] @property def switch_disconnectors_df(self): @@ -288,7 +307,7 @@ def peak_generation_capacity_per_technology(self): return self.generators_df.groupby(["type"]).sum()["p_nom"] @property - def p_nom(self): + def p_set(self): """ Cumulative peak load of loads in the network in MW. @@ -298,10 +317,10 @@ def p_nom(self): Cumulative peak load of loads in the network in MW. """ - return self.loads_df.p_nom.sum() + return self.loads_df.p_set.sum() @property - def p_nom_per_sector(self): + def p_set_per_sector(self): """ Cumulative peak load of loads in the network per sector in MW. @@ -311,7 +330,7 @@ def p_nom_per_sector(self): Cumulative peak load of loads in the network per sector in MW. """ - return self.loads_df.groupby(["sector"]).sum()["p_nom"] + return self.loads_df.groupby(["sector"]).sum()["p_set"] def __repr__(self): return "_".join([self.__class__.__name__, str(self.id)]) @@ -493,7 +512,7 @@ def draw( edge_color_is_sequence = False node_size = [ - top.get_connected_components_from_bus(v)["loads"].p_nom.sum() * 50000 + 10 + top.get_connected_components_from_bus(v)["loads"].p_set.sum() * 50000 + 10 for v in G ] if isinstance(node_color, pd.Series): @@ -535,3 +554,10 @@ def draw( else: plt.savefig(filename, dpi=150, bbox_inches="tight", pad_inches=0.1) plt.close() + + @property + def geopandas(self): + """ + TODO: Remove this as soon as LVGrids are georeferenced + """ + raise NotImplementedError("LV Grids are not georeferenced yet.") diff --git a/edisgo/network/results.py b/edisgo/network/results.py index 398ca7824..7257419f1 100755 --- a/edisgo/network/results.py +++ b/edisgo/network/results.py @@ -5,7 +5,7 @@ import numpy as np import pandas as pd -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def _get_matching_dict_of_attributes_and_file_names(): @@ -126,7 +126,7 @@ def pfa_p(self): """ try: return self._pfa_p - except: + except Exception: return pd.DataFrame() @pfa_p.setter @@ -171,7 +171,7 @@ def pfa_q(self): """ try: return self._pfa_q - except: + except Exception: return pd.DataFrame() @pfa_q.setter @@ -205,7 +205,7 @@ def v_res(self): """ try: return self._v_res - except: + except Exception: return pd.DataFrame() @v_res.setter @@ -240,7 +240,7 @@ def i_res(self): """ try: return self._i_res - except: + except Exception: return pd.DataFrame() @i_res.setter @@ -328,7 +328,7 @@ def equipment_changes(self): """ try: return self._equipment_changes - except: + except Exception: return pd.DataFrame() @equipment_changes.setter @@ -387,7 +387,7 @@ def grid_expansion_costs(self): """ try: return self._grid_expansion_costs - except: + except Exception: return pd.DataFrame() @grid_expansion_costs.setter @@ -437,7 +437,7 @@ def grid_losses(self): """ try: return self._grid_losses - except: + except Exception: return pd.DataFrame() @grid_losses.setter @@ -477,7 +477,7 @@ def pfa_slack(self): """ try: return self._pfa_slack - except: + except Exception: return pd.DataFrame() @pfa_slack.setter @@ -514,7 +514,7 @@ def pfa_v_mag_pu_seed(self): """ try: return self._pfa_v_mag_pu_seed - except: + except Exception: return pd.DataFrame() @pfa_v_mag_pu_seed.setter @@ -552,7 +552,7 @@ def pfa_v_ang_seed(self): """ try: return self._pfa_v_ang_seed - except: + except Exception: return pd.DataFrame() @pfa_v_ang_seed.setter @@ -709,7 +709,7 @@ def unresolved_issues(self): """ try: return self._unresolved_issues - except: + except Exception: return pd.DataFrame() @unresolved_issues.setter @@ -732,16 +732,19 @@ def _add_line_to_equipment_changes(self, line): contain `type_info`. Line representative is the series name. """ - self.equipment_changes = self.equipment_changes.append( - pd.DataFrame( - { - "iteration_step": [0], - "change": ["added"], - "equipment": [line.type_info], - "quantity": [1], - }, - index=[line.name], - ) + self.equipment_changes = pd.concat( + [ + self.equipment_changes, + pd.DataFrame( + { + "iteration_step": [0], + "change": ["added"], + "equipment": [line.type_info], + "quantity": [1], + }, + index=[line.name], + ), + ] ) def _del_line_from_equipment_changes(self, line_repr): @@ -988,7 +991,7 @@ def _save_storage_integration_results(target_dir): } if not save_seed: parameters["powerflow_results"] = [ - _ for _ in parameters["powerflow_results"] if not "seed" in _ + _ for _ in parameters["powerflow_results"] if "seed" not in _ ] if not isinstance(parameters, dict): @@ -1010,8 +1013,8 @@ def _save_storage_integration_results(target_dir): ) logger.error(message) raise KeyError(message) - except: - raise + except Exception: + raise Exception # save measures pd.DataFrame(data={"measure": self.measures}).to_csv( diff --git a/edisgo/network/timeseries.py b/edisgo/network/timeseries.py index 252408248..0af469a81 100644 --- a/edisgo/network/timeseries.py +++ b/edisgo/network/timeseries.py @@ -1,1859 +1,2236 @@ +from __future__ import annotations + +import itertools import logging import os import numpy as np import pandas as pd +from edisgo.flex_opt import q_control from edisgo.io import timeseries_import from edisgo.tools.tools import ( assign_voltage_level_to_component, - drop_duplicated_columns, get_weather_cells_intersecting_with_grid_district, ) -logger = logging.getLogger("edisgo") - - -def _get_attributes_to_save(): - """ - Helper function to specify which TimeSeries attributes to save and restore. - - Is used in functions :attr:`~.network.timeseries.TimeSeries.to_csv` - and :attr:`~.network.timeseries.TimeSeries.from_csv`. - - Returns - ------- - list - List of TimeSeries attributes to save and restore. - - """ - return [ - "loads_active_power", - "loads_reactive_power", - "generators_active_power", - "generators_reactive_power", - "charging_points_active_power", - "charging_points_reactive_power", - "storage_units_active_power", - "storage_units_reactive_power", - ] +logger = logging.getLogger(__name__) class TimeSeries: """ - Defines time series for all loads, generators and storage units in network - (if set). + Holds component-specific active and reactive power time series. - Can also contain time series for loads (sector-specific), generators - (technology-specific), and curtailment (technology-specific). + All time series are fixed time series that in case of flexibilities result after + application of a heuristic or optimisation. They can be used for power flow + calculations. - Parameters - ----------- + Also holds any raw time series data that was used to generate component-specific + time series in attribute `time_series_raw`. See + :class:`~.network.timeseries.TimeSeriesRaw` for more information. + + Other Parameters + ----------------- timeindex : :pandas:`pandas.DatetimeIndex`, optional Can be used to define a time range for which to obtain the provided time series and run power flow analysis. Default: None. - generators_active_power : :pandas:`pandas.DataFrame`, optional - Active power timeseries of all generators in topology. Index of - DataFrame has to contain timeindex and column names are names of - generators. - generators_reactive_power : :pandas:`pandas.DataFrame`, optional - Reactive power timeseries of all generators in topology. Format is the - same as for generators_active power. - loads_active_power : :pandas:`pandas.DataFrame`, optional - Active power timeseries of all loads in topology. Index of DataFrame - has to contain timeindex and column names are names of loads. - loads_reactive_power : :pandas:`pandas.DataFrame`, optional - Reactive power timeseries of all loads in topology. Format is the - same as for loads_active power. - storage_units_active_power : :pandas:`pandas.DataFrame`, optional - Active power timeseries of all storage units in topology. Index of - DataFrame has to contain timeindex and column names are names of - storage units. - storage_units_reactive_power : :pandas:`pandas.DataFrame`, optional - Reactive power timeseries of all storage_units in topology. Format is - the same as for storage_units_active power. - curtailment : :pandas:`pandas.DataFrame` or list, optional - In the case curtailment is applied to all fluctuating renewables - this needs to be a DataFrame with active power curtailment time series. - Time series can either be aggregated by technology type or by type - and weather cell ID. In the first case columns of the DataFrame are - 'solar' and 'wind'; in the second case columns need to be a - :pandas:`pandas.MultiIndex` with the first level - containing the type and the second level the weather cell ID. - In the case curtailment is only applied to specific generators, this - parameter needs to be a list of all generators that are curtailed. - Default: None. - - Notes - ----- - Can also hold the following attributes when specific mode of - :meth:`get_component_timeseries` is called: mode, generation_fluctuating, - generation_dispatchable, generation_reactive_power, load, - load_reactive_power. See description of meth:`get_component_timeseries` for - format of these. + + Attributes + ----------- + time_series_raw : :class:`~.network.timeseries.TimeSeriesRaw` + Raw time series. See :class:`~.network.timeseries.TimeSeriesRaw` for more + information. """ def __init__(self, **kwargs): self._timeindex = kwargs.get("timeindex", pd.DatetimeIndex([])) + self.time_series_raw = TimeSeriesRaw() + + @property + def is_worst_case(self) -> bool: + """ + Time series mode. + + Is used to distinguish between normal time series analysis and worst-case + analysis. Is determined by checking if the timindex starts before 1971 as the + default for worst-case is 1970. Be mindful when creating your own worst-cases. + + Returns + ------- + bool + Indicates if current time series is worst-case time series with different + assumptions for mv and lv simultaneities. + """ + if len(self.timeindex) > 0: + return self.timeindex[0] < pd.Timestamp("1971-01-01") + return False @property def timeindex(self): """ - Defines analysed time steps. + Time index all time-dependent attributes are indexed by. - Can be used to define a time range for which to obtain the provided - time series and run power flow analysis. + Is used as default time steps in e.g. power flow analysis. Parameters ----------- - ind : timestamp or list(timestamp) + ind : :pandas:`pandas.DatetimeIndex` + Time index all time-dependent attributes are indexed by. Returns ------- :pandas:`pandas.DatetimeIndex` - See class definition for details. + Time index all time-dependent attributes are indexed by. """ return self._timeindex @timeindex.setter def timeindex(self, ind): - # make iterable - if not hasattr(ind, "__len__"): - ind = [ind] - # make datetime index - ind = pd.DatetimeIndex(ind) - if len(self._timeindex) > 0: - # check if new time index is subset of existing time index - if not ind.isin(self._timeindex).all(): - logger.warning( - "Not all time steps of new time index lie within existing " - "time index. This may cause problems later on." - ) + if len(self._timeindex) > 0 and not ind.isin(self._timeindex).all(): + logger.warning( + "Not all time steps of new time index lie within existing " + "time index. This may cause problems later on." + ) self._timeindex = ind + def _internal_getter(self, attribute): + try: + return getattr(self, f"_{attribute}").loc[self.timeindex, :] + except AttributeError: + return pd.DataFrame(index=self.timeindex) + except KeyError: + logger.warning( + f"Timeindex and {attribute} have deviating indices. " + "Empty dataframe will be returned." + ) + return pd.DataFrame(index=self.timeindex) + @property def generators_active_power(self): """ - Active power time series of all generators in MW. + Active power time series of generators in MW. + + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Active power time series of all generators in topology in MW. Index of the + dataframe is a time index and column names are names of generators. Returns ------- :pandas:`pandas.DataFrame` - See class definition for details. + Active power time series of all generators in topology in MW for time steps + given in :py:attr:`~timeindex`. For more information on the dataframe see + input parameter `df`. """ - try: - return self._generators_active_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("generators_active_power") @generators_active_power.setter - def generators_active_power(self, generators_active_power_ts): - self._generators_active_power = generators_active_power_ts + def generators_active_power(self, df): + self._generators_active_power = df @property def generators_reactive_power(self): """ - Reactive power timeseries of generators in MVA. + Reactive power time series of generators in MVA. + + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Reactive power time series of all generators in topology in MVA. Index of + the dataframe is a time index and column names are names of generators. Returns ------- :pandas:`pandas.DataFrame` - See class definition for details. + Reactive power time series of all generators in topology in MVA for time + steps given in :py:attr:`~timeindex`. For more information on the dataframe + see input parameter `df`. """ - try: - return self._generators_reactive_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("generators_reactive_power") @generators_reactive_power.setter - def generators_reactive_power(self, generators_reactive_power_ts): - self._generators_reactive_power = generators_reactive_power_ts + def generators_reactive_power(self, df): + self._generators_reactive_power = df @property def loads_active_power(self): """ - Active power timeseries of loads in MW. + Active power time series of loads in MW. + + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Active power time series of all loads in topology in MW. Index of the + dataframe is a time index and column names are names of loads. Returns ------- - dict or :pandas:`pandas.DataFrame` - See class definition for details. + :pandas:`pandas.DataFrame` + Active power time series of all loads in topology in MW for time steps + given in :py:attr:`~timeindex`. For more information on the dataframe see + input parameter `df`. """ - try: - return self._loads_active_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("loads_active_power") @loads_active_power.setter - def loads_active_power(self, loads_active_power_ts): - self._loads_active_power = loads_active_power_ts + def loads_active_power(self, df): + self._loads_active_power = df @property def loads_reactive_power(self): """ - Reactive power timeseries in MVA. + Reactive power time series of loads in MVA. + + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Reactive power time series of all loads in topology in MVA. Index of + the dataframe is a time index and column names are names of loads. Returns ------- :pandas:`pandas.DataFrame` - See class definition for details. + Reactive power time series of all loads in topology in MVA for time + steps given in :py:attr:`~timeindex`. For more information on the dataframe + see input parameter `df`. """ - try: - return self._loads_reactive_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("loads_reactive_power") @loads_reactive_power.setter - def loads_reactive_power(self, loads_reactive_power_ts): - self._loads_reactive_power = loads_reactive_power_ts + def loads_reactive_power(self, df): + self._loads_reactive_power = df @property def storage_units_active_power(self): """ - Active power timeseries of storage units in MW. - - Returns - ------- - dict or :pandas:`pandas.DataFrame` - See class definition for details. - - """ - try: - return self._storage_units_active_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) - - @storage_units_active_power.setter - def storage_units_active_power(self, storage_units_active_power_ts): - self._storage_units_active_power = storage_units_active_power_ts + Active power time series of storage units in MW. - @property - def storage_units_reactive_power(self): - """ - Reactive power timeseries of storage units in MVA. + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Active power time series of all storage units in topology in MW. Index of + the dataframe is a time index and column names are names of storage units. Returns ------- :pandas:`pandas.DataFrame` - See class definition for details. + Active power time series of all storage units in topology in MW for time + steps given in :py:attr:`~timeindex`. For more information on the dataframe + see input parameter `df`. """ - try: - return self._storage_units_reactive_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("storage_units_active_power") - @storage_units_reactive_power.setter - def storage_units_reactive_power(self, storage_units_reactive_power_ts): - self._storage_units_reactive_power = storage_units_reactive_power_ts + @storage_units_active_power.setter + def storage_units_active_power(self, df): + self._storage_units_active_power = df @property - def charging_points_active_power(self): - """ - Active power timeseries of charging points in MW. - - Returns - ------- - dict or :pandas:`pandas.DataFrame` - See class definition for details. - + def storage_units_reactive_power(self): """ - try: - return self._charging_points_active_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + Reactive power time series of storage units in MVA. - @charging_points_active_power.setter - def charging_points_active_power(self, charging_points_active_power_ts): - self._charging_points_active_power = charging_points_active_power_ts - - @property - def charging_points_reactive_power(self): - """ - Reactive power timeseries of charging points in MVA. + Parameters + ---------- + df : :pandas:`pandas.DataFrame` + Reactive power time series of all storage units in topology in MVA. Index of + the dataframe is a time index and column names are names of storage units. Returns ------- :pandas:`pandas.DataFrame` - See class definition for details. + Reactive power time series of all storage units in topology in MVA for time + steps given in :py:attr:`~timeindex`. For more information on the dataframe + see input parameter `df`. """ - try: - return self._charging_points_reactive_power.loc[self.timeindex, :] - except: - return pd.DataFrame(index=self.timeindex) + return self._internal_getter("storage_units_reactive_power") - @charging_points_reactive_power.setter - def charging_points_reactive_power(self, charging_points_reactive_power_ts): - self._charging_points_reactive_power = charging_points_reactive_power_ts - - # @property - # def curtailment(self): - # """ - # Get curtailment time series of dispatchable generators (only active - # power) - # - # Parameters - # ---------- - # curtailment : list or :pandas:`pandas.DataFrame` - # See class definition for details. - # - # Returns - # ------- - # :pandas:`pandas.DataFrame` - # In the case curtailment is applied to all solar and wind generators - # curtailment time series either aggregated by technology type or by - # type and weather cell ID are returnded. In the first case columns - # of the DataFrame are 'solar' and 'wind'; in the second case columns - # need to be a :pandas:`pandas.MultiIndex` with the - # first level containing the type and the second level the weather - # cell ID. - # In the case curtailment is only applied to specific generators, - # curtailment time series of all curtailed generators, specified in - # by the column name are returned. - # - # """ - # if self._curtailment is not None: - # if isinstance(self._curtailment, pd.DataFrame): - # try: - # return self._curtailment.loc[[self.timeindex], :] - # except: - # return self._curtailment.loc[self.timeindex, :] - # elif isinstance(self._curtailment, list): - # try: - # curtailment = pd.DataFrame() - # for gen in self._curtailment: - # curtailment[gen] = gen.curtailment - # return curtailment - # except: - # raise - # else: - # return None - # - # @curtailment.setter - # def curtailment(self, curtailment): - # self._curtailment = curtailment + @storage_units_reactive_power.setter + def storage_units_reactive_power(self, df): + self._storage_units_reactive_power = df - @property - def residual_load(self): + def reset(self): """ - Returns residual load. - - Residual load for each time step is calculated from total load - (including charging points) minus total generation minus - storage active power (discharge is positive). - A positive residual load represents a load case while a negative - residual load here represents a feed-in case. - Grid losses are not considered. - - Returns - ------- - :pandas:`pandas.Series` + Resets all time series. - Series with residual load in MW. + Active and reactive power time series of all loads, generators and storage units + are deleted, as well as everything stored in :py:attr:`~time_series_raw`. """ - return ( - self.loads_active_power.sum(axis=1) - + self.charging_points_active_power.sum(axis=1) - - self.generators_active_power.sum(axis=1) - - self.storage_units_active_power.sum(axis=1) - ) - - @property - def timesteps_load_feedin_case(self): + self.generators_active_power = None + self.loads_active_power = None + self.storage_units_active_power = None + self.generators_reactive_power = None + self.loads_reactive_power = None + self.storage_units_reactive_power = None + self.time_series_raw = TimeSeriesRaw() + + def set_active_power_manual( + self, edisgo_object, ts_generators=None, ts_loads=None, ts_storage_units=None + ): """ - Contains residual load and information on feed-in and load case. - - Residual load is calculated from total (load - generation) in the - network. Grid losses are not considered. - - Feed-in and load case are identified based on the - generation, load and storage time series and defined as follows: - - 1. Load case: positive (load - generation - storage) at HV/MV - substation - 2. Feed-in case: negative (load - generation - storage) at HV/MV - substation + Sets given component active power time series. - Returns - ------- - :pandas:`pandas.Series` + If time series for a component were already set before, they are overwritten. - Series with information on whether time step is handled as load - case ('load_case') or feed-in case ('feedin_case') for each time - step in :py:attr:`~timeindex`. + Parameters + ---------- + edisgo_object : :class:`~.EDisGo` + ts_generators : :pandas:`pandas.DataFrame` + Active power time series in MW of generators. Index of the data frame is + a datetime index. Columns contain generators names of generators to set + time series for. + ts_loads : :pandas:`pandas.DataFrame` + Active power time series in MW of loads. Index of the data frame is + a datetime index. Columns contain load names of loads to set + time series for. + ts_storage_units : :pandas:`pandas.DataFrame` + Active power time series in MW of storage units. Index of the data frame is + a datetime index. Columns contain storage unit names of storage units to set + time series for. """ - - return self.residual_load.apply( - lambda _: "feedin_case" if _ < 0.0 else "load_case" + self._set_manual( + edisgo_object, + "active", + ts_generators=ts_generators, + ts_loads=ts_loads, + ts_storage_units=ts_storage_units, ) - def reduce_memory(self, attr_to_reduce=None, to_type="float32"): + def set_reactive_power_manual( + self, edisgo_object, ts_generators=None, ts_loads=None, ts_storage_units=None + ): """ - Reduces size of dataframes to save memory. + Sets given component reactive power time series. - See :attr:`EDisGo.reduce_memory` for more information. + If time series for a component were already set before, they are overwritten. Parameters - ----------- - attr_to_reduce : list(str), optional - List of attributes to reduce size for. Attributes need to be - dataframes containing only time series. Per default, all active - and reactive power time series of generators, loads, storage units - and charging points are reduced. - to_type : str, optional - Data type to convert time series data to. This is a tradeoff - between precision and memory. Default: "float32". + ---------- + edisgo_object : :class:`~.EDisGo` + ts_generators : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of generators. Index of the data frame is + a datetime index. Columns contain generators names of generators to set + time series for. + ts_loads : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of loads. Index of the data frame is + a datetime index. Columns contain load names of loads to set + time series for. + ts_storage_units : :pandas:`pandas.DataFrame` + Reactive power time series in MVA of storage units. Index of the data frame + is a datetime index. Columns contain storage unit names of storage units to + set time series for. """ - if attr_to_reduce is None: - attr_to_reduce = [ - "generators_active_power", - "generators_reactive_power", - "loads_active_power", - "loads_reactive_power", - "charging_points_active_power", - "charging_points_reactive_power", - "storage_units_active_power", - "storage_units_reactive_power", - ] - for attr in attr_to_reduce: - setattr( - self, - attr, - getattr(self, attr).apply(lambda _: _.astype(to_type)), - ) + self._set_manual( + edisgo_object, + "reactive", + ts_generators=ts_generators, + ts_loads=ts_loads, + ts_storage_units=ts_storage_units, + ) - def to_csv(self, directory, reduce_memory=False, **kwargs): + def _set_manual( + self, + edisgo_object, + mode, + ts_generators=None, + ts_loads=None, + ts_storage_units=None, + ): """ - Saves component time series to csv. + Sets given component time series. - Saves the following time series to csv files with the same file name - (if the time series dataframe is not empty): - - * loads_active_power and loads_reactive_power - * generators_active_power and generators_reactive_power - * charging_points_active_power and charging_points_reactive_power - * storage_units_active_power and storage_units_reactive_power + If time series for a component were already set before, they are overwritten. Parameters ---------- - directory: str - Directory to save time series in. - reduce_memory : bool, optional - If True, size of dataframes is reduced using - :attr:`~.network.timeseries.TimeSeries.reduce_memory`. Optional - parameters of :attr:`~.network.timeseries.TimeSeries.reduce_memory` - can be passed as kwargs to this function. Default: False. - - Other Parameters - ------------------ - kwargs : - Kwargs may contain optional arguments of - :attr:`~.network.timeseries.TimeSeries.reduce_memory`. + edisgo_object : :class:`~.EDisGo` + mode : str + Defines whether to set active or reactive power time series. Possible + options are "active" and "reactive". + ts_generators : :pandas:`pandas.DataFrame` + Active or reactive power time series in MW or MVA of generators. + Index of the data frame is a datetime index. Columns contain generator + names of generators to set time series for. + ts_loads : :pandas:`pandas.DataFrame` + Active or reactive power time series in MW or MVA of loads. + Index of the data frame is a datetime index. Columns contain load names of + loads to set time series for. + ts_storage_units : :pandas:`pandas.DataFrame` + Active or reactive power time series in MW or MVA of storage units. + Index of the data frame is a datetime index. Columns contain storage unit + names of storage units to set time series for. """ - save_attributes = _get_attributes_to_save() + if ts_generators is not None: + # check if all generators time series are provided for exist in the network + # and only set time series for those that do + comps_in_network = _check_if_components_exist( + edisgo_object, ts_generators.columns, "generators" + ) + ts_generators = ts_generators.loc[:, comps_in_network] - if reduce_memory is True: - self.reduce_memory(**kwargs) + # drop generators time series from self.generators_(re)active_power that may + # already exist for some of the given generators + df_name = f"generators_{mode}_power" + drop_component_time_series( + obj=self, df_name=df_name, comp_names=ts_generators.columns + ) + # set (re)active power + _add_component_time_series(obj=self, df_name=df_name, ts_new=ts_generators) + + if ts_loads is not None: + # check if all loads time series are provided for exist in the network + # and only set time series for those that do + comps_in_network = _check_if_components_exist( + edisgo_object, ts_loads.columns, "loads" + ) + ts_loads = ts_loads.loc[:, comps_in_network] - os.makedirs(directory, exist_ok=True) + # drop load time series from self.loads_(re)active_power that may + # already exist for some of the given loads + df_name = f"loads_{mode}_power" + drop_component_time_series( + obj=self, df_name=df_name, comp_names=ts_loads.columns + ) + # set (re)active power + _add_component_time_series(obj=self, df_name=df_name, ts_new=ts_loads) + + if ts_storage_units is not None: + # check if all storage units time series are provided for exist in the + # network and only set time series for those that do + comps_in_network = _check_if_components_exist( + edisgo_object, ts_storage_units.columns, "storage_units" + ) + ts_storage_units = ts_storage_units.loc[:, comps_in_network] - for attr in save_attributes: - if not getattr(self, attr).empty: - getattr(self, attr).to_csv( - os.path.join(directory, "{}.csv".format(attr)) - ) + # drop storage unit time series from self.storage_units_(re)active_power + # that may already exist for some of the given storage units + df_name = f"storage_units_{mode}_power" + drop_component_time_series( + obj=self, df_name=df_name, comp_names=ts_storage_units.columns + ) + # set (re)active power + _add_component_time_series( + obj=self, df_name=df_name, ts_new=ts_storage_units + ) - def from_csv(self, directory): + def set_worst_case( + self, + edisgo_object, + cases, + generators_names=None, + loads_names=None, + storage_units_names=None, + ): """ - Restores time series from csv files. - - See :func:`~to_csv` for more information on which time series are - saved. + Sets demand and feed-in of loads, generators and storage units for the + specified worst cases. + + Per default time series are set for all loads, generators and storage units + in the network. + + Possible worst cases are 'load_case' (heavy load flow case) and 'feed-in_case' + (reverse power flow case). Each case is set up once for dimensioning of the MV + grid ('load_case_mv'/'feed-in_case_mv') and once for the dimensioning of the LV + grid ('load_case_lv'/'feed-in_case_lv'), as different simultaneity factors are + assumed for the different voltage levels. + + Assumed simultaneity factors specified in the config section + `worst_case_scale_factor` are used to generate active power demand or feed-in. + For the reactive power behavior fixed cosphi is assumed. The power factors + set in the config section `reactive_power_factor` and the power factor + mode, defining whether components behave inductive or capacitive, given + in the config section `reactive_power_mode`, are used. + + Component specific information is given below: + + * Generators + + Worst case feed-in time series are distinguished by technology (PV, wind + and all other) and whether it is a load or feed-in case. + In case of generator worst case time series it is not distinguished by + whether it is used to analyse the MV or LV. However, both options are + generated as it is distinguished in the case of loads. + Worst case scaling factors for generators are specified in + the config section `worst_case_scale_factor` through the parameters: + 'feed-in_case_feed-in_pv', 'feed-in_case_feed-in_wind', + 'feed-in_case_feed-in_other', + 'load_case_feed-in_pv', load_case_feed-in_wind', and + 'load_case_feed-in_other'. + + For reactive power a fixed cosphi is assumed. A different reactive power + factor is used for generators in the MV and generators in the LV. + The reactive power factors for generators are specified in + the config section `reactive_power_factor` through the parameters: + 'mv_gen' and 'lv_gen'. + + * Conventional loads + + Worst case load time series are distinguished by whether it + is a load or feed-in case and whether it used to analyse the MV or LV. + Worst case scaling factors for conventional loads are specified in + the config section `worst_case_scale_factor` through the parameters: + 'mv_feed-in_case_load', 'lv_feed-in_case_load', 'mv_load_case_load', and + 'lv_load_case_load'. + + For reactive power a fixed cosphi is assumed. A different reactive power + factor is used for loads in the MV and loads in the LV. + The reactive power factors for conventional loads are specified in + the config section `reactive_power_factor` through the parameters: + 'mv_load' and 'lv_load'. + + * Charging points + + Worst case demand time series are distinguished by use case (home charging, + work charging, public (slow) charging and HPC), by whether it is a load or + feed-in case and by whether it used to analyse the MV or LV. + Worst case scaling factors for charging points are specified in + the config section `worst_case_scale_factor` through the parameters: + 'mv_feed-in_case_cp_home', 'mv_feed-in_case_cp_work', + 'mv_feed-in_case_cp_public', and 'mv_feed-in_case_cp_hpc', + 'lv_feed-in_case_cp_home', 'lv_feed-in_case_cp_work', + 'lv_feed-in_case_cp_public', and 'lv_feed-in_case_cp_hpc', + 'mv_load-in_case_cp_home', 'mv_load-in_case_cp_work', + 'mv_load-in_case_cp_public', and 'mv_load-in_case_cp_hpc', + 'lv_load-in_case_cp_home', 'lv_load-in_case_cp_work', + 'lv_load-in_case_cp_public', and 'lv_load-in_case_cp_hpc'. + + For reactive power a fixed cosphi is assumed. A different reactive power + factor is used for charging points in the MV and charging points in the LV. + The reactive power factors for charging points are specified in + the config section `reactive_power_factor` through the parameters: + 'mv_cp' and 'lv_cp'. + + * Heat pumps + + Worst case demand time series are distinguished by whether it is a load or + feed-in case and by whether it used to analyse the MV or LV. + Worst case scaling factors for heat pumps are specified in + the config section `worst_case_scale_factor` through the parameters: + 'mv_feed-in_case_hp', 'lv_feed-in_case_hp', 'mv_load_case_hp', and + 'lv_load_case_hp'. + + For reactive power a fixed cosphi is assumed. A different reactive power + factor is used for heat pumps in the MV and heat pumps in the LV. + The reactive power factors for heat pumps are specified in + the config section `reactive_power_factor` through the parameters: + 'mv_hp' and 'lv_hp'. + + * Storage units + + Worst case feed-in time series are distinguished by whether it is a load or + feed-in case. + In case of storage units worst case time series it is not distinguished by + whether it is used to analyse the MV or LV. However, both options are + generated as it is distinguished in the case of loads. + Worst case scaling factors for storage units are specified in + the config section `worst_case_scale_factor` through the parameters: + 'feed-in_case_storage' and 'load_case_storage'. + + For reactive power a fixed cosphi is assumed. A different reactive power + factor is used for storage units in the MV and storage units in the LV. + The reactive power factors for storage units are specified in + the config section `reactive_power_factor` through the parameters: + 'mv_storage' and 'lv_storage'. Parameters ---------- - directory : str - Directory time series are saved in. + edisgo_object : :class:`~.EDisGo` + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + generators_names : list(str) + Defines for which generators to set worst case time series. If None, + time series are set for all generators. Default: None. + loads_names : list(str) + Defines for which loads to set worst case time series. If None, + time series are set for all loads. Default: None. + storage_units_names : list(str) + Defines for which storage units to set worst case time series. If None, + time series are set for all storage units. Default: None. + + Notes + ----- + Be careful, this function overwrites all previously set time series in the case + that these are not worst case time series. If previously set time series are + worst case time series is checked using :attr:`is_worst_case`. + + Further, if this function is called for a component whose worst-case time series + are already set, they are overwritten, even if previously set time series + were set for a different worst-case. + + Also be aware that loads for which type information is not set are handled + as conventional loads. """ - timeindex = None - for attr in _get_attributes_to_save(): - path = os.path.join(directory, "{}.csv".format(attr)) - if os.path.exists(path): - setattr( - self, - attr, - pd.read_csv(path, index_col=0, parse_dates=True), - ) - if timeindex is None: - timeindex = getattr(self, "_{}".format(attr)).index - if timeindex is None: - timeindex = pd.DatetimeIndex([]) - self._timeindex = timeindex - - -def get_component_timeseries(edisgo_obj, **kwargs): - """ - Sets up TimeSeries Object. - - Parameters - ---------- - edisgo_obj : :class:`~.EDisGo` - The eDisGo data container - mode : :obj:`str`, optional - Mode must be set in case of worst-case analyses and can either be - 'worst-case' (both feed-in and load case), 'worst-case-feedin' (only - feed-in case) or 'worst-case-load' (only load case). All other - parameters except of `config-data` will be ignored. Default: None. - Mode can also be set to manual in order to give standard timeseries, - that are not obtained from oedb or demandlib. - timeseries_generation_fluctuating : :obj:`str` or :pandas:`pandas.DataFrame`, optional - Parameter used to obtain time series for active power feed-in of - fluctuating renewables wind and solar. - Possible options are: - - * 'oedb' - Time series for 2011 are obtained from the OpenEnergy DataBase. - * :pandas:`pandas.DataFrame` - DataFrame with time series, normalized with corresponding capacity. - Time series can either be aggregated by technology type or by type - and weather cell ID. In the first case columns of the DataFrame are - 'solar' and 'wind'; in the second case columns need to be a - :pandas:`pandas.MultiIndex` with the first level - containing the type and the second level the weather cell ID. - - Default: None. - timeseries_generation_dispatchable : :pandas:`pandas.DataFrame`, optional - DataFrame with time series for active power of each (aggregated) - type of dispatchable generator normalized with corresponding capacity. - Columns represent generator type: - - * 'gas' - * 'coal' - * 'biomass' - * 'other' - * ... - - Use 'other' if you don't want to explicitly provide every possible - type. Default: None. - timeseries_generation_reactive_power : :pandas:`pandas.DataFrame`, optional - DataFrame with time series of normalized reactive power (normalized by - the rated nominal active power) per technology and weather cell. Index - needs to be a :pandas:`pandas.DatetimeIndex`. - Columns represent generator type and can be a MultiIndex column - containing the weather cell ID in the second level. If the technology - doesn't contain weather cell information i.e. if it is other than solar - and wind generation, this second level can be left as an empty string ''. - - Default: None. - timeseries_load : :obj:`str` or :pandas:`pandas.DataFrame`, optional - Parameter used to obtain time series of active power of (cumulative) - loads. - Possible options are: - - * 'demandlib' - Time series are generated using the oemof demandlib. - * :pandas:`pandas.DataFrame` - DataFrame with load time series of each (cumulative) type of load - normalized with corresponding annual energy demand. - Columns represent load type: - - * 'residential' - * 'retail' - * 'industrial' - * 'agricultural' - - Default: None. - timeseries_load_reactive_power : :pandas:`pandas.DataFrame`, optional - Parameter to get the time series of the reactive power of loads. It should be a - DataFrame with time series of normalized reactive power (normalized by - annual energy demand) per load sector. Index needs to be a - :pandas:`pandas.DatetimeIndex`. - Columns represent load type: - - * 'residential' - * 'retail' - * 'industrial' - * 'agricultural' - - Default: None. - timeindex : :pandas:`pandas.DatetimeIndex` - Can be used to define a time range for which to obtain load time series - and feed-in time series of fluctuating renewables or to define time - ranges of the given time series that will be used in the analysis. - """ - mode = kwargs.get("mode", None) - timeindex = kwargs.get("timeindex", edisgo_obj.timeseries.timeindex) - # reset TimeSeries - edisgo_obj.timeseries = TimeSeries(timeindex=timeindex) - edisgo_obj.timeseries.mode = mode - if mode: - if "worst-case" in mode: - modes = _get_worst_case_modes(mode) - # set random timeindex - edisgo_obj.timeseries.timeindex = pd.date_range( - "1/1/1970", periods=len(modes), freq="H" - ) - _worst_case_generation(edisgo_obj=edisgo_obj, modes=modes) - _worst_case_load(edisgo_obj=edisgo_obj, modes=modes) - _worst_case_storage(edisgo_obj=edisgo_obj, modes=modes) - - elif mode == "manual": - if kwargs.get("loads_active_power", None) is not None: - edisgo_obj.timeseries.loads_active_power = kwargs.get( - "loads_active_power" - ) - if kwargs.get("loads_reactive_power", None) is not None: - edisgo_obj.timeseries.loads_reactive_power = kwargs.get( - "loads_reactive_power" + def _overwrite_time_series(p, q, comp_type): + ts_dict = {f"{comp_type}_active_power": p, f"{comp_type}_reactive_power": q} + for k, v in ts_dict.items(): + # drop previously set time series + drop_component_time_series(obj=self, df_name=k, comp_names=v.columns) + # set time series + _add_component_time_series( + obj=self, + df_name=k, + ts_new=v.rename(index=self.timeindex_worst_cases), ) - if kwargs.get("generators_active_power", None) is not None: - edisgo_obj.timeseries.generators_active_power = kwargs.get( - "generators_active_power" - ) - if kwargs.get("generators_reactive_power", None) is not None: - edisgo_obj.timeseries.generators_reactive_power = kwargs.get( - "generators_reactive_power" - ) + if self.is_worst_case is False: + # reset all time series + self.reset() - if kwargs.get("storage_units_active_power", None) is not None: - edisgo_obj.timeseries.storage_units_active_power = kwargs.get( - "storage_units_active_power" - ) - if kwargs.get("storage_units_reactive_power", None) is not None: - edisgo_obj.timeseries.storage_units_reactive_power = kwargs.get( - "storage_units_reactive_power" + # create a mapping from worst case cases to time stamps needed for pypsa + worst_cases = [ + "_".join(case) for case in itertools.product(cases, ["mv", "lv"]) + ] + time_stamps = pd.date_range("1/1/1970", periods=len(worst_cases), freq="H") + self.timeindex_worst_cases = pd.Series(time_stamps, index=worst_cases) + self.timeindex = time_stamps + else: + # check if cases previously set are the same as set now or if additional + # cases are set + if not hasattr(self, "timeindex_worst_cases"): + logger.warning( + "Worst case time series were previously set but attribute " + "'TimeSeries.timeindex_worst_cases' was not set, so it is not " + "known which time step corresponds to which case. Additional worst " + "case time series can therefore not be set. Please either set " + "'TimeSeries.timeindex_worst_cases' or use 'TimeSeries.reset()' " + "to reset all time series and set new ones." ) - - if kwargs.get("charging_points_active_power", None) is not None: - edisgo_obj.timeseries.charging_points_active_power = kwargs.get( - "charging_points_active_power" + return + set_cases = [ + _ + for _ in ["feed-in_case", "load_case"] + if any(_ in element for element in self.timeindex_worst_cases.index) + ] + new_cases = [_ for _ in cases if _ not in set_cases] + if len(new_cases) > 0: + worst_cases = [ + "_".join(case) + for case in itertools.product(new_cases, ["mv", "lv"]) + ] + time_stamps = pd.date_range( + self.timeindex.max() + pd.Timedelta(1, unit="hours"), + periods=len(worst_cases), + freq="H", ) - if kwargs.get("charging_points_reactive_power", None) is not None: - edisgo_obj.timeseries.charging_points_reactive_power = kwargs.get( - "charging_points_reactive_power" + self.timeindex_worst_cases = self.timeindex_worst_cases.append( + pd.Series(time_stamps, index=worst_cases) ) + self.timeindex = self.timeindex.append(time_stamps) + + if generators_names is None: + generators_df = edisgo_object.topology.generators_df else: - raise ValueError("{} is not a valid mode.".format(mode)) - else: - config_data = edisgo_obj.config - - weather_cell_ids = get_weather_cells_intersecting_with_grid_district(edisgo_obj) - - # feed-in time series of fluctuating renewables - ts = kwargs.get("timeseries_generation_fluctuating", None) - if isinstance(ts, pd.DataFrame): - edisgo_obj.timeseries.generation_fluctuating = ts - elif isinstance(ts, str) and ts == "oedb": - edisgo_obj.timeseries.generation_fluctuating = ( - timeseries_import.feedin_oedb( - config_data, weather_cell_ids, kwargs.get("timeindex", None) - ) + generators_names = _check_if_components_exist( + edisgo_object, generators_names, "generators" ) - else: - raise ValueError( - "Your input for " - '"timeseries_generation_fluctuating" is not ' - "valid.".format(mode) + generators_df = edisgo_object.topology.generators_df.loc[ + generators_names, : + ] + if not generators_df.empty: + # assign voltage level for reactive power + df = assign_voltage_level_to_component( + generators_df, edisgo_object.topology.buses_df ) - # feed-in time series for dispatchable generators - ts = kwargs.get("timeseries_generation_dispatchable", None) - if isinstance(ts, pd.DataFrame): - edisgo_obj.timeseries.generation_dispatchable = ts + p, q = self._worst_case_generators(cases, df, edisgo_object.config) + _overwrite_time_series(p, q, "generators") + + if loads_names is None: + loads_df = edisgo_object.topology.loads_df else: - # check if there are any dispatchable generators, and - # throw error if there are - gens = edisgo_obj.topology.generators_df - if not (gens.type.isin(["solar", "wind"])).all(): - raise ValueError( - 'Your input for "timeseries_generation_dispatchable" ' - "is not valid.".format(mode) + loads_names = _check_if_components_exist( + edisgo_object, loads_names, "loads" + ) + loads_df = edisgo_object.topology.loads_df.loc[loads_names, :] + if not loads_df.empty: + # assign voltage level for reactive power + df = assign_voltage_level_to_component( + loads_df, edisgo_object.topology.buses_df + ) + # conventional loads + df_tmp = df[df.type == "conventional_load"] + if not df_tmp.empty: + p, q = self._worst_case_conventional_load( + cases, df_tmp, edisgo_object.config ) - # reactive power time series for all generators - ts = kwargs.get("timeseries_generation_reactive_power", None) - if isinstance(ts, pd.DataFrame): - edisgo_obj.timeseries.generation_reactive_power = ts - # set time index - if kwargs.get("timeindex", None) is not None: - edisgo_obj.timeseries.timeindex = kwargs.get("timeindex") - else: - edisgo_obj.timeseries.timeindex = ( - edisgo_obj.timeseries.generation_fluctuating.index + _overwrite_time_series(p, q, "loads") + # charging points + df_tmp = df[df.type == "charging_point"] + if not df_tmp.empty: + p, q = self._worst_case_charging_points( + cases, df_tmp, edisgo_object.config + ) + _overwrite_time_series(p, q, "loads") + # heat pumps + df_tmp = df[df.type == "heat_pump"] + if not df_tmp.empty: + p, q = self._worst_case_heat_pumps(cases, df_tmp, edisgo_object.config) + _overwrite_time_series(p, q, "loads") + # check if there are loads without time series remaining and if so, handle + # them as conventional loads + loads_without_ts = list( + set(df.index) - set(self.loads_active_power.columns) ) + if loads_without_ts: + logging.warning( + "There are loads where information on type of load is missing. " + "Handled types are 'conventional_load', 'charging_point', and " + "'heat_pump'. Loads with missing type information are handled as " + "conventional loads. If this is not the wanted behavior, please " + "set type information. This concerns the following " + f"loads: {loads_without_ts}." + ) + p, q = self._worst_case_conventional_load( + cases, df.loc[loads_without_ts, :], edisgo_object.config + ) + _overwrite_time_series(p, q, "loads") - # load time series - ts = kwargs.get("timeseries_load", None) - if isinstance(ts, pd.DataFrame): - edisgo_obj.timeseries.load = ts - elif ts == "demandlib": - edisgo_obj.timeseries.load = timeseries_import.load_time_series_demandlib( - config_data, year=edisgo_obj.timeseries.timeindex[0].year - ) + if storage_units_names is None: + storage_units_df = edisgo_object.topology.storage_units_df else: - raise ValueError( - "Your input for 'timeseries_load' is not valid.".format(mode) + storage_units_names = _check_if_components_exist( + edisgo_object, storage_units_names, "storage_units" ) - # reactive power timeseries for loads - ts = kwargs.get("timeseries_load_reactive_power", None) - if isinstance(ts, pd.DataFrame): - edisgo_obj.timeseries.load_reactive_power = ts - - # create generator active and reactive power timeseries - _generation_from_timeseries(edisgo_obj=edisgo_obj) - - # create load active and reactive power timeseries - _load_from_timeseries(edisgo_obj=edisgo_obj) - - # create storage active and reactive power timeseries - _storage_from_timeseries( - edisgo_obj=edisgo_obj, - ts_active_power=kwargs.get("timeseries_storage_units", None), - ts_reactive_power=kwargs.get( - "timeseries_storage_units_reactive_power", None - ), - ) - - # check if time series for the set time index can be obtained - _check_timeindex(edisgo_obj=edisgo_obj) + storage_units_df = edisgo_object.topology.storage_units_df.loc[ + storage_units_names, : + ] + if not storage_units_df.empty: + # assign voltage level for reactive power + df = assign_voltage_level_to_component( + storage_units_df, edisgo_object.topology.buses_df + ) + p, q = self._worst_case_storage_units(cases, df, edisgo_object.config) + _overwrite_time_series(p, q, "storage_units") + def _worst_case_generators(self, cases, df, configs): + """ + Get feed-in of generators for worst case analyses. -def _load_from_timeseries(edisgo_obj, load_names=None): - """ - Set active and reactive load time series for specified loads by sector. + See :py:attr:`~set_worst_case` for further information. - If loads are not specified, sets time series of all existing loads. - In case reactive power time series are not provided, a fixed power factor - as specified in config file 'config_timeseries' in section - 'reactive_power_factor' is assumed. + Parameters + ---------- + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + df : :pandas:`pandas.DataFrame` + Dataframe with information on generators in the format of + :attr:`~.network.topology.Topology.generators_df` with additional column + "voltage_level". + configs : :class:`~.tools.config.Config` + Configuration data with assumed simultaneity factors and reactive power + behavior. - Parameters - ---------- - edisgo_obj : :class:`~.EDisGo` - load_names : list(str) + Returns + ------- + (:pandas:`pandas.DataFrame`, :pandas:`pandas.DataFrame`) + Active and reactive power (in MW and MVA, respectively) in each case for + each generator. The index of the dataframe contains the case and the columns + are the generator names. - """ - # get all requested loads and drop existing timeseries - if load_names is None: - load_names = edisgo_obj.topology.loads_df.index - loads = edisgo_obj.topology.loads_df.loc[load_names] - _drop_existing_component_timeseries( - edisgo_obj=edisgo_obj, comp_type="loads", comp_names=load_names - ) - # set active power - edisgo_obj.timeseries.loads_active_power = pd.concat( - [ - edisgo_obj.timeseries.loads_active_power, - loads.apply( - lambda x: edisgo_obj.timeseries.load[x.sector] * x.annual_consumption - if x.sector in edisgo_obj.timeseries.load.columns - else edisgo_obj.timeseries.load["other"] * x.annual_consumption, - axis=1, - ).T, - ], - axis=1, - ) + """ + # check that all generators have information on nominal power, technology type, + # and voltage level they are in + df = df.loc[:, ["p_nom", "voltage_level", "type"]] + check = df.isnull().any(axis=1) + if check.any(): + raise AttributeError( + f"The following generators have missing information on nominal power, " + f"technology type or voltage level: {check[check].index.values}." + ) - # if reactive power is given as attribute set with inserted timeseries - if hasattr(edisgo_obj.timeseries, "load_reactive_power"): - edisgo_obj.timeseries.loads_reactive_power = pd.concat( + # active power + # get worst case configurations + worst_case_scale_factors = configs["worst_case_scale_factor"] + # get power scaling factors for different technologies, voltage levels and + # feed-in/load case + types = ["pv", "wind", "other"] + power_scaling = pd.DataFrame(columns=types) + for t in types: + for case in cases: + power_scaling.at[f"{case}_mv", t] = worst_case_scale_factors[ + f"{case}_feed-in_{t}" + ] + + power_scaling.at[f"{case}_lv", t] = power_scaling.at[f"{case}_mv", t] + + # calculate active power of generators + active_power = pd.concat( [ - edisgo_obj.timeseries.loads_reactive_power, - loads.apply( - lambda x: edisgo_obj.timeseries.load_reactive_power[x.sector] - * x.annual_consumption - if x.sector in edisgo_obj.timeseries.load_reactive_power.columns - else edisgo_obj.timeseries.load_reactive_power["other"] - * x.annual_consumption, - axis=1, + power_scaling.pv.to_frame("p_nom").dot( + df[df.type == "solar"].loc[:, ["p_nom"]].T + ), + power_scaling.wind.to_frame("p_nom").dot( + df[df.type == "wind"].loc[:, ["p_nom"]].T + ), + power_scaling.other.to_frame("p_nom").dot( + df[~df.type.isin(["solar", "wind"])].loc[:, ["p_nom"]].T ), ], axis=1, ) - # set default reactive load - else: - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, df=loads, component_type="loads" - ) - - -def _generation_from_timeseries(edisgo_obj, generator_names=None): - def _timeseries_fluctuating(): - if isinstance( - edisgo_obj.timeseries.generation_fluctuating.columns, pd.MultiIndex - ): - return gens_fluctuating.apply( - lambda x: edisgo_obj.timeseries.generation_fluctuating[x.type][ - x.weather_cell_id - ].T - * x.p_nom, - axis=1, - ).T - else: - return gens_fluctuating.apply( - lambda x: edisgo_obj.timeseries.generation_fluctuating[x.type].T - * x.p_nom, - axis=1, - ).T - - def _timeseries_dispatchable(): - return gens_dispatchable.apply( - lambda x: edisgo_obj.timeseries.generation_dispatchable[x.type] * x.p_nom - if x.type in edisgo_obj.timeseries.generation_dispatchable.columns - else edisgo_obj.timeseries.generation_dispatchable["other"] * x.p_nom, - axis=1, - ).T - if generator_names is None: - generator_names = edisgo_obj.topology.generators_df.index - # get all generators - gens = edisgo_obj.topology.generators_df.loc[generator_names] - # drop existing timeseries - _drop_existing_component_timeseries(edisgo_obj, "generators", generator_names) - # handling of fluctuating generators - gens_fluctuating = gens[gens.type.isin(["solar", "wind"])] - gens_dispatchable = gens[~gens.index.isin(gens_fluctuating.index)] - if gens_dispatchable.empty and gens_fluctuating.empty: - logger.debug("No generators provided to add timeseries for.") - return - if not gens_dispatchable.empty: - edisgo_obj.timeseries.generators_active_power = pd.concat( - [ - edisgo_obj.timeseries.generators_active_power, - _timeseries_dispatchable(), - ], - axis=1, - sort=False, + # reactive power + # get worst case configurations for each generator + power_factor = q_control._fixed_cosphi_default_power_factor( + df, "generators", configs ) - if not gens_fluctuating.empty: - edisgo_obj.timeseries.generators_active_power = pd.concat( - [ - edisgo_obj.timeseries.generators_active_power, - _timeseries_fluctuating(), - ], - axis=1, - sort=False, + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, "generators", configs ) - - # set reactive power if given as attribute - if ( - hasattr(edisgo_obj.timeseries, "generation_reactive_power") - and gens.index.isin( - edisgo_obj.timeseries.generation_reactive_power.columns - ).all() - ): - - edisgo_obj.timeseries.generators_reactive_power = pd.concat( + # write reactive power configuration to TimeSeriesRaw + self.time_series_raw.q_control.drop(df.index, errors="ignore", inplace=True) + self.time_series_raw.q_control = pd.concat( [ - edisgo_obj.timeseries.generators_reactive_power, - edisgo_obj.timeseries.generation_reactive_power.loc[:, gens.index], - ], - axis=1, - ) - # set default reactive power by cos_phi - else: - logger.debug("Reactive power calculated by cos(phi).") - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, df=gens, component_type="generators" + self.time_series_raw.q_control, + pd.DataFrame( + index=df.index, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] ) + # calculate reactive power of generators + reactive_power = q_control.fixed_cosphi(active_power, q_sign, power_factor) + return active_power, reactive_power + def _worst_case_conventional_load(self, cases, df, configs): + """ + Get demand of conventional loads for worst case analyses. -def _storage_from_timeseries( - edisgo_obj, ts_active_power, ts_reactive_power, name_storage_units=None -): - """ - Sets up storage timeseries for mode=None in get_component_timeseries. - Timeseries with the right timeindex and columns with storage unit names - have to be provided. - - Overwrites active and reactive power time series of storage units - - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - ts_active_power: :pandas:`pandas.DataFrame` - Timeseries of active power with index=timeindex, - columns=name_storage_units - ts_reactive_power: :pandas:`pandas.DataFrame` - Timeseries of active power with index=timeindex, - columns=name_storage_units - name_storage_units: str or list of str - Names of storage units to add timeseries for. Default None, timeseries - for all storage units of edisgo_obj are set then. - """ - if name_storage_units is None: - name_storage_units = edisgo_obj.topology.storage_units_df.index - storage_units_df = edisgo_obj.topology.storage_units_df.loc[name_storage_units] - _drop_existing_component_timeseries(edisgo_obj, "storage_units", name_storage_units) - - if len(storage_units_df) == 0: - edisgo_obj.timeseries.storage_units_active_power = pd.DataFrame( - {}, index=edisgo_obj.timeseries.timeindex - ) - edisgo_obj.timeseries.storage_units_reactive_power = pd.DataFrame( - {}, index=edisgo_obj.timeseries.timeindex - ) - elif ts_active_power is None: - # Todo: move up to check at the start - raise ValueError("No timeseries for storage units provided.") - else: - try: - # check if indices and columns are correct - if (ts_active_power.index == edisgo_obj.timeseries.timeindex).all(): - edisgo_obj.timeseries.storage_units_active_power = ( - drop_duplicated_columns( - pd.concat( - [ - edisgo_obj.timeseries.storage_units_active_power, - ts_active_power.loc[:, name_storage_units], - ], - axis=1, - ) - ) - ) - # check if reactive power is given - if ( - ts_reactive_power is not None - and (ts_active_power.index == edisgo_obj.timeseries.timeindex).all() - ): - edisgo_obj.timeseries.storage_units_reactive_power = ( - drop_duplicated_columns( - pd.concat( - [ - edisgo_obj.timeseries.storage_units_reactive_power, - ts_reactive_power.loc[:, name_storage_units], - ], - axis=1, - ) - ) - ) - else: - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, - df=storage_units_df, - component_type="storage_units", - ) - else: - raise ValueError( - "Index of provided storage active power " - "timeseries does not match timeindex of " - "TimeSeries class." - ) - except ValueError: - raise ValueError( - "Columns or indices of inserted storage " - "timeseries do not match topology and " - "timeindex." - ) - + See :py:attr:`~set_worst_case` for further information. -def _worst_case_generation(edisgo_obj, modes, generator_names=None): - """ - Define worst case generation time series for fluctuating and - dispatchable generators. + Parameters + ---------- + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + df : :pandas:`pandas.DataFrame` + Dataframe with information on conventional loads in the format of + :attr:`~.network.topology.Topology.loads_df` with additional column + "voltage_level". + configs : :class:`~.tools.config.Config` + Configuration data with assumed simultaneity factors and reactive power + behavior. - Overwrites active and reactive power time series of generators + Returns + ------- + (:pandas:`pandas.DataFrame`, :pandas:`pandas.DataFrame`) + Active and reactive power (in MW and MVA, respectively) in each case for + each load. The index of the dataframe contains the case and the columns + are the load names. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - modes : list - List with worst-cases to generate time series for. Can be - 'feedin_case', 'load_case' or both. - generator_names: str or list of str - Names of generators to add timeseries for. Default None, timeseries - for all generators of edisgo_obj are set then. - """ - if generator_names is None: - generator_names = edisgo_obj.topology.generators_df.index - - gens_df = edisgo_obj.topology.generators_df.loc[ - generator_names, ["bus", "type", "p_nom"] - ] - - # check that all generators have bus, type, nominal power - check_gens = gens_df.isnull().any(axis=1) - if check_gens.any(): - raise AttributeError( - "The following generators have either missing bus, type or " - "nominal power: {}.".format(check_gens[check_gens].index.values) - ) + """ + # check that all loads have information on nominal power (grid connection power) + # and voltage level they are in + df = df.loc[:, ["p_set", "voltage_level"]] + check = df.isnull().any(axis=1) + if check.any(): + raise AttributeError( + f"The following loads have missing information on grid connection power" + f" or voltage level: {check[check].index.values}." + ) - # active power - # get worst case configurations - worst_case_scale_factors = edisgo_obj.config["worst_case_scale_factor"] + # active power + # get worst case configurations + worst_case_scale_factors = configs["worst_case_scale_factor"] + # get power scaling factors for different voltage levels and feed-in/load case + power_scaling = pd.Series(dtype=float) + for case in cases: + for voltage_level in ["mv", "lv"]: + power_scaling.at[f"{case}_{voltage_level}"] = worst_case_scale_factors[ + f"{voltage_level}_{case}_load" + ] - # get worst case scaling factors for different generator types and - # feed-in/load case - worst_case_ts = pd.DataFrame( - { - "solar": [ - worst_case_scale_factors["{}_feedin_pv".format(mode)] for mode in modes - ], - "wind": [ - worst_case_scale_factors["{}_feedin_wind".format(mode)] - for mode in modes - ], - "other": [ - worst_case_scale_factors["{}_feedin_other".format(mode)] - for mode in modes - ], - }, - index=edisgo_obj.timeseries.timeindex, - ) + # calculate active power of loads + active_power = power_scaling.to_frame("p_set").dot(df.loc[:, ["p_set"]].T) - gen_ts = pd.DataFrame( - index=edisgo_obj.timeseries.timeindex, - columns=gens_df.index, - dtype="float64", - ) - # assign normalized active power time series to solar generators - cols_pv = gen_ts[gens_df.index[gens_df.type == "solar"]].columns - if len(cols_pv) > 0: - gen_ts[cols_pv] = pd.concat( - [worst_case_ts.loc[:, ["solar"]]] * len(cols_pv), axis=1, sort=True + # reactive power + # get worst case configurations for each load + power_factor = q_control._fixed_cosphi_default_power_factor( + df, "loads", configs ) - # assign normalized active power time series to wind generators - cols_wind = gen_ts[gens_df.index[gens_df.type == "wind"]].columns - if len(cols_wind) > 0: - gen_ts[cols_wind] = pd.concat( - [worst_case_ts.loc[:, ["wind"]]] * len(cols_wind), - axis=1, - sort=True, + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, "loads", configs ) - # assign normalized active power time series to other generators - cols = gen_ts.columns[~gen_ts.columns.isin(cols_pv.append(cols_wind))] - if len(cols) > 0: - gen_ts[cols] = pd.concat( - [worst_case_ts.loc[:, ["other"]]] * len(cols), axis=1, sort=True + # write reactive power configuration to TimeSeriesRaw + self.time_series_raw.q_control.drop(df.index, errors="ignore", inplace=True) + self.time_series_raw.q_control = pd.concat( + [ + self.time_series_raw.q_control, + pd.DataFrame( + index=df.index, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] ) + # calculate reactive power of loads + reactive_power = q_control.fixed_cosphi(active_power, q_sign, power_factor) + return active_power, reactive_power - # drop existing timeseries - _drop_existing_component_timeseries(edisgo_obj, "generators", generator_names) + def _worst_case_charging_points(self, cases, df, configs): + """ + Get demand of charging points for worst case analyses. - # multiply normalized time series by nominal power of generator - edisgo_obj.timeseries.generators_active_power = pd.concat( - [ - edisgo_obj.timeseries.generators_active_power, - gen_ts.mul(gens_df.p_nom), - ], - axis=1, - ) + See :py:attr:`~set_worst_case` for further information. - # calculate reactive power - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, df=gens_df, component_type="generators" - ) + Parameters + ---------- + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + df : :pandas:`pandas.DataFrame` + Dataframe with information on charging points in the format of + :attr:`~.network.topology.Topology.loads_df` with additional column + "voltage_level". + configs : :class:`~.tools.config.Config` + Configuration data with assumed simultaneity factors and reactive power + behavior. + Returns + ------- + (:pandas:`pandas.DataFrame`, :pandas:`pandas.DataFrame`) + Active and reactive power (in MW and MVA, respectively) in each case for + each charging point. The index of the dataframe contains the case and the + columns are the charging point names. -def _worst_case_load(edisgo_obj, modes, load_names=None): - """ - Define worst case load time series for each sector. + """ + # check that all charging points have information on nominal power, + # sector (use case), and voltage level they are in + df = df.loc[:, ["p_set", "voltage_level", "sector"]] + check = df.isnull().any(axis=1) + if check.any(): + raise AttributeError( + "The following charging points have missing information on nominal " + f"power, use case or voltage level: {check[check].index.values}." + ) - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - modes : list - List with worst-cases to generate time series for. Can be - 'feedin_case', 'load_case' or both. - load_names: str or list of str - Names of loads to add timeseries for. Default None, timeseries - for all loads of edisgo_obj are set then. + # check that there is no invalid sector (only "home", "work", "public", and + # "hpc" allowed) + use_cases = ["home", "work", "public", "hpc"] + sectors = df.sector.unique() + diff = list(set(sectors) - set(use_cases)) + if diff: + raise AttributeError( + "The following charging points have a use case no worst case " + "simultaneity factor is defined for: " + f"{df[df.sector.isin(diff)].index.values}." + ) - """ + # active power + # get worst case configurations + worst_case_scale_factors = configs["worst_case_scale_factor"] + # get power scaling factors for different use cases, voltage levels and + # feed-in/load case + power_scaling = pd.DataFrame(columns=sectors) + for s in sectors: + for case in cases: + for voltage_level in ["mv", "lv"]: + power_scaling.at[ + f"{case}_{voltage_level}", s + ] = worst_case_scale_factors[f"{voltage_level}_{case}_cp_{s}"] + + # calculate active power of charging points + active_power = pd.concat( + [ + power_scaling.loc[:, s] + .to_frame("p_set") + .dot(df[df.sector == s].loc[:, ["p_set"]].T) + for s in sectors + ], + axis=1, + ) - voltage_levels = ["mv", "lv"] + # reactive power + # get worst case configurations for each charging point + power_factor = q_control._fixed_cosphi_default_power_factor( + df, "charging_points", configs + ) + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, "charging_points", configs + ) + # write reactive power configuration to TimeSeriesRaw + self.time_series_raw.q_control.drop(df.index, errors="ignore", inplace=True) + self.time_series_raw.q_control = pd.concat( + [ + self.time_series_raw.q_control, + pd.DataFrame( + index=df.index, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] + ) + # calculate reactive power of charging points + reactive_power = q_control.fixed_cosphi(active_power, q_sign, power_factor) + return active_power, reactive_power - if load_names is None: - load_names = edisgo_obj.topology.loads_df.index - loads_df = edisgo_obj.topology.loads_df.loc[load_names, ["bus", "sector", "p_nom"]] + def _worst_case_heat_pumps(self, cases, df, configs): + """ + Get demand of heat pumps for worst case analyses. - # check that all loads have bus, sector, annual consumption - check_loads = loads_df.isnull().any(axis=1) - if check_loads.any(): - raise AttributeError( - "The following loads have either missing bus, sector or " - "annual consumption: {}.".format(check_loads[check_loads].index.values) - ) + See :py:attr:`~set_worst_case` for further information. - # assign voltage level to loads - if loads_df.empty: - return - loads_df["voltage_level"] = loads_df.apply( - lambda _: "lv" if edisgo_obj.topology.buses_df.at[_.bus, "v_nom"] < 1 else "mv", - axis=1, - ) + Parameters + ---------- + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + df : :pandas:`pandas.DataFrame` + Dataframe with information on heat pumps in the format of + :attr:`~.network.topology.Topology.loads_df` with additional column + "voltage_level". + configs : :class:`~.tools.config.Config` + Configuration data with assumed simultaneity factors and reactive power + behavior. - # active power - # get worst case configurations - worst_case_scale_factors = edisgo_obj.config["worst_case_scale_factor"] - - # get power scaling factors for different voltage levels and feed-in/ - # load case - power_scaling = {} - for voltage_level in voltage_levels: - power_scaling[voltage_level] = [ - worst_case_scale_factors["{}_{}_load".format(voltage_level, mode)] - for mode in modes - ] + Returns + ------- + (:pandas:`pandas.DataFrame`, :pandas:`pandas.DataFrame`) + Active and reactive power (in MW and MVA, respectively) in each case for + each heat pump. The index of the dataframe contains the case and the columns + are the heat pump names. - # assign power scaling factor to each load - power_scaling_df = pd.DataFrame( - data=np.transpose( - [power_scaling[loads_df.at[col, "voltage_level"]] for col in loads_df.index] - ), - index=edisgo_obj.timeseries.timeindex, - columns=loads_df.index, - ) + """ + # check that all heat pumps have information on nominal power, and voltage level + # they are in + df = df.loc[:, ["p_set", "voltage_level"]] + check = df.isnull().any(axis=1) + if check.any(): + raise AttributeError( + f"The following heat pumps have missing information on nominal power or" + f" voltage level: {check[check].index.values}." + ) - # drop existing timeseries - _drop_existing_component_timeseries( - edisgo_obj=edisgo_obj, comp_type="loads", comp_names=load_names - ) + # active power + # get worst case configurations + worst_case_scale_factors = configs["worst_case_scale_factor"] + # get power scaling factors for different voltage levels and feed-in/load case + power_scaling = pd.Series() + for case in cases: + for voltage_level in ["mv", "lv"]: + power_scaling.at[f"{case}_{voltage_level}"] = worst_case_scale_factors[ + f"{voltage_level}_{case}_hp" + ] - # calculate active power of loads - edisgo_obj.timeseries.loads_active_power = pd.concat( - [ - edisgo_obj.timeseries.loads_active_power, - (power_scaling_df * loads_df.loc[:, "p_nom"]), - ], - axis=1, - ) + # calculate active power of heat pumps + active_power = power_scaling.to_frame("p_set").dot(df.loc[:, ["p_set"]].T) - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, df=loads_df, component_type="loads" - ) + # reactive power + # get worst case configurations for each heat pump + power_factor = q_control._fixed_cosphi_default_power_factor( + df, "heat_pumps", configs + ) + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, "heat_pumps", configs + ) + # write reactive power configuration to TimeSeriesRaw + self.time_series_raw.q_control.drop(df.index, errors="ignore", inplace=True) + self.time_series_raw.q_control = pd.concat( + [ + self.time_series_raw.q_control, + pd.DataFrame( + index=df.index, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] + ) + # calculate reactive power of heat pumps + reactive_power = q_control.fixed_cosphi(active_power, q_sign, power_factor) + return active_power, reactive_power + def _worst_case_storage_units(self, cases, df, configs): + """ + Get charging and discharging of storage units for worst case analyses. -def _worst_case_storage(edisgo_obj, modes, storage_names=None): - """ - Define worst case storage unit time series. + See :py:attr:`~set_worst_case` for further information. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - modes : list - List with worst-cases to generate time series for. Can be - 'feedin_case', 'load_case' or both. - storage_names: str or list of str - Names of storage units to add timeseries for. Default None, - timeseries for all storage units of edisgo_obj are set then. + Parameters + ---------- + cases : list(str) + List with worst-cases to generate time series for. Can be + 'feed-in_case', 'load_case' or both. + df : :pandas:`pandas.DataFrame` + Dataframe with information on generators in the format of + :attr:`~.network.topology.Topology.generators_df` with additional column + "voltage_level". + configs : :class:`~.tools.config.Config` + Configuration data with assumed simultaneity factors and reactive power + behavior. - """ - if len(edisgo_obj.topology.storage_units_df) == 0: - edisgo_obj.timeseries.storage_units_active_power = pd.DataFrame( - {}, index=edisgo_obj.timeseries.timeindex - ) - edisgo_obj.timeseries.storage_units_reactive_power = pd.DataFrame( - {}, index=edisgo_obj.timeseries.timeindex - ) - else: - if storage_names is None: - storage_names = edisgo_obj.topology.storage_units_df.index - storage_df = edisgo_obj.topology.storage_units_df.loc[ - storage_names, ["bus", "p_nom"] - ] + Returns + ------- + (:pandas:`pandas.DataFrame`, :pandas:`pandas.DataFrame`) + Active and reactive power (in MW and MVA, respectively) in each case for + each storage. The index of the dataframe contains the case and the columns + are the storage names. - # check that all storage units have bus, nominal power - check_storage = storage_df.isnull().any(axis=1) - if check_storage.any(): + """ + # check that all storage units have information on nominal power + # and voltage level they are in + df = df.loc[:, ["p_nom", "voltage_level"]] + check = df.isnull().any(axis=1) + if check.any(): raise AttributeError( - "The following storage units have either missing bus or " - "nominal power: {}.".format(check_storage[check_storage].index.values) + "The following storage units have missing information on nominal power" + f" or voltage level: {check[check].index.values}." ) # active power # get worst case configurations - worst_case_scale_factors = edisgo_obj.config["worst_case_scale_factor"] - - # get worst case scaling factors for feed-in/load case - worst_case_ts = pd.DataFrame( - np.transpose( - [ - [ - worst_case_scale_factors["{}_storage".format(mode)] - for mode in modes - ] - ] - * len(storage_df) - ), - index=edisgo_obj.timeseries.timeindex, - columns=storage_df.index, + worst_case_scale_factors = configs["worst_case_scale_factor"] + # get power scaling factors for different voltage levels and feed-in/load case + power_scaling = pd.Series() + for case in cases: + power_scaling.at[f"{case}_mv"] = worst_case_scale_factors[f"{case}_storage"] + power_scaling.at[f"{case}_lv"] = power_scaling.at[f"{case}_mv"] + + # calculate active power of loads + active_power = power_scaling.to_frame("p_nom").dot(df.loc[:, ["p_nom"]].T) + + # reactive power + # get worst case configurations for each load + power_factor = q_control._fixed_cosphi_default_power_factor( + df, "storage_units", configs ) - edisgo_obj.timeseries.storage_units_active_power = drop_duplicated_columns( - pd.concat( - [ - edisgo_obj.timeseries.storage_units_active_power, - (worst_case_ts * storage_df.p_nom), - ], - axis=1, - ), - keep="last", + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, "storage_units", configs ) - - _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj=edisgo_obj, - df=storage_df, - component_type="storage_units", + # write reactive power configuration to TimeSeriesRaw + self.time_series_raw.q_control.drop(df.index, errors="ignore", inplace=True) + self.time_series_raw.q_control = pd.concat( + [ + self.time_series_raw.q_control, + pd.DataFrame( + index=df.index, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] ) + # calculate reactive power of loads + reactive_power = q_control.fixed_cosphi(active_power, q_sign, power_factor) + return active_power, reactive_power + def predefined_fluctuating_generators_by_technology( + self, edisgo_object, ts_generators, generator_names=None + ): + """ + Set active power feed-in time series for fluctuating generators by technology. -def _check_timeindex(edisgo_obj): - """ - Check function to check if all feed-in and load time series contain - values for the specified time index. + In case time series are provided per technology and weather cell ID, active + power feed-in time series are also set by technology and weather cell ID. - """ - try: - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.generators_reactive_power.index - ).all() - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.generators_active_power.index - ).all() - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.loads_reactive_power.index - ).all() - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.loads_active_power.index - ).all() - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.storage_units_reactive_power.index - ).all() - assert edisgo_obj.timeseries.timeindex.isin( - edisgo_obj.timeseries.storage_units_active_power.index - ).all() - except: - message = "Time index of feed-in and load time series does not match." - logging.error(message) - raise KeyError(message) - - -def add_loads_timeseries(edisgo_obj, load_names, **kwargs): - """ - Define load time series for active and reactive power. For more information - on required and optional parameters see description of - :func:`get_component_timeseries`. The mode initially set within - get_component_timeseries is used here to set new timeseries. If a different - mode is required, change edisgo_obj.timeseries.mode to the desired mode and - provide respective parameters. + Parameters + ---------- + edisgo_object : :class:`~.EDisGo` + ts_generators : str or :pandas:`pandas.DataFrame` + Defines which technology-specific or technology and weather cell specific + active power time series to use. + Possible options are: + + * 'oedb' + + Technology and weather cell specific hourly feed-in time series are + obtained from the + `OpenEnergy DataBase + `_ + for the weather year 2011. See + :func:`edisgo.io.timeseries_import.import_feedin_timeseries` for more + information. + + * :pandas:`pandas.DataFrame` + + DataFrame with self-provided feed-in time series per technology or + per technology and weather cell ID normalized to a nominal capacity + of 1. + In case time series are provided only by technology, columns of the + DataFrame contain the technology type as string. + In case time series are provided by technology and weather cell ID + columns need to be a :pandas:`pandas.MultiIndex` with the + first level containing the technology as string and the second level + the weather cell ID as integer. + Index needs to be a :pandas:`pandas.DatetimeIndex`. + + When importing a ding0 grid and/or using predefined scenarios + of the future generator park, + each generator has an assigned weather cell ID that identifies the + weather data cell from the weather data set used in the research + project `open_eGo `_ to + determine feed-in profiles. The weather cell ID can be retrieved + from column `weather_cell_id` in + :attr:`~.network.topology.Topology.generators_df` and could be + overwritten to use own weather cells. + + generator_names : list(str) + Defines for which fluctuating generators to use technology-specific time + series. If None, all generators technology (and weather cell) specific time + series are provided for are used. In case the time series are retrieved from + the oedb, all solar and wind generators are used. Default: None. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - load_names: str or list of str - Names of loads to add timeseries for. Default None, timeseries - for all loads of edisgo_obj are set then. + """ + # in case time series from oedb are used, retrieve oedb time series + if isinstance(ts_generators, str) and ts_generators == "oedb": + weather_cell_ids = get_weather_cells_intersecting_with_grid_district( + edisgo_object + ) + ts_generators = timeseries_import.feedin_oedb( + edisgo_object.config, weather_cell_ids, self.timeindex + ) + elif not isinstance(ts_generators, pd.DataFrame): + raise ValueError( + "'ts_generators' must either be a pandas DataFrame or 'oedb'." + ) - """ - # If timeseries have not yet been filled, it is not - # necessary to add timeseries - if not hasattr(edisgo_obj.timeseries, "mode"): - logger.debug( - "Timeseries have not been set yet. Please call" - "get_component_timeseries to create " - "timeseries." - ) - return - # turn single name to list - if isinstance(load_names, str): - load_names = [load_names] - # append timeseries of respective mode - if edisgo_obj.timeseries.mode: - if "worst-case" in edisgo_obj.timeseries.mode: - modes = _get_worst_case_modes(edisgo_obj.timeseries.mode) - # set random timeindex - _worst_case_load(edisgo_obj=edisgo_obj, modes=modes, load_names=load_names) - elif edisgo_obj.timeseries.mode == "manual": - loads_active_power = kwargs.get("loads_active_power", None) - if loads_active_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, loads_active_power, load_names + # set generator_names if None + if generator_names is None: + if isinstance(ts_generators.columns, pd.MultiIndex): + groups = edisgo_object.topology.generators_df.groupby( + ["type", "weather_cell_id"] + ).groups + combinations = ts_generators.columns + generator_names = np.concatenate( + [groups[_].values for _ in combinations if _ in groups.keys()] ) - loads_reactive_power = kwargs.get("loads_reactive_power", None) - if loads_reactive_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, loads_reactive_power, load_names - ) - _drop_existing_component_timeseries( - edisgo_obj=edisgo_obj, comp_type="loads", comp_names=load_names - ) - # add new load timeseries - edisgo_obj.timeseries.loads_active_power = pd.concat( - [ - edisgo_obj.timeseries.loads_active_power, - loads_active_power.loc[:, load_names], - ], + else: + technologies = ts_generators.columns.unique() + generator_names = edisgo_object.topology.generators_df[ + edisgo_object.topology.generators_df.type.isin(technologies) + ].index + generator_names = _check_if_components_exist( + edisgo_object, generator_names, "generators" + ) + generators_df = edisgo_object.topology.generators_df.loc[generator_names, :] + + # drop existing time series + drop_component_time_series( + obj=self, df_name="generators_active_power", comp_names=generator_names + ) + + # scale time series by nominal power + if isinstance(ts_generators.columns, pd.MultiIndex): + ts_scaled = generators_df.apply( + lambda x: ts_generators[x.type][x.weather_cell_id].T * x.p_nom, axis=1, - ) - edisgo_obj.timeseries.loads_reactive_power = pd.concat( + ).T + else: + ts_scaled = generators_df.apply( + lambda x: ts_generators[x.type].T * x.p_nom, + axis=1, + ).T + if not ts_scaled.empty: + self.generators_active_power = pd.concat( [ - edisgo_obj.timeseries.loads_reactive_power, - loads_reactive_power.loc[:, load_names], + self.generators_active_power, + ts_scaled, ], axis=1, + sort=False, ) - else: - raise ValueError( - "{} is not a valid mode.".format(edisgo_obj.timeseries.mode) - ) - else: - # create load active and reactive power timeseries - _load_from_timeseries(edisgo_obj=edisgo_obj, load_names=load_names) + # write to TimeSeriesRaw + if not isinstance(ts_generators.columns, pd.MultiIndex): + # make columns a multiindex, otherwise columns are not a multiindex anymore + # after concatenation and duplicates not correctly identified + ts_generators = ts_generators.copy() + ts_generators.columns = pd.MultiIndex.from_product( + [ts_generators.columns, [None]] + ) + tmp = pd.concat( + [ + self.time_series_raw.fluctuating_generators_active_power_by_technology, + ts_generators, + ], + axis=1, + ) + tmp = tmp.loc[:, ~tmp.columns.duplicated(keep="last")] + self.time_series_raw.fluctuating_generators_active_power_by_technology = tmp -def add_generators_timeseries(edisgo_obj, generator_names, **kwargs): - """ - Define generator time series for active and reactive power. For more - information on required and optional parameters see description of - :func:`get_component_timeseries`.The mode initially set within - get_component_timeseries is used here to set new timeseries. If a different - mode is required, change edisgo_obj.timeseries.mode to the desired mode and - provide respective parameters. + def predefined_dispatchable_generators_by_technology( + self, edisgo_object, ts_generators, generator_names=None + ): + """ + Set active power feed-in time series for dispatchable generators by technology. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - generator_names: str or list of str - Names of generators to add timeseries for. + Parameters + ---------- + edisgo_object : :class:`~.EDisGo` + ts_generators : :pandas:`pandas.DataFrame` + DataFrame with self-provided active power time series of each + type of dispatchable generator normalized to a nominal capacity of 1. + Columns contain the technology type as string, e.g. 'gas', 'coal'. + Use 'other' if you don't want to explicitly provide a time series for every + possible technology. In the current grid existing generator technologies + can be retrieved from column `type` in + :attr:`~.network.topology.Topology.generators_df`. + Index needs to be a :pandas:`pandas.DatetimeIndex`. + generator_names : list(str) + Defines for which dispatchable generators to use technology-specific time + series. If None, all dispatchable generators technology-specific time series + are provided for are used. In case `ts_generators` contains a column + 'other', all dispatchable generators in the network (i.e. all but solar and + wind generators) are used. - Other Parameters - ----------------- - generators_active_power: :pandas:`pandas.DataFrame` - Active power time series in MW. - generators_reactive_power: :pandas:`pandas.DataFrame` - Reactive power time series in MW. + """ + if not isinstance(ts_generators, pd.DataFrame): + raise ValueError("'ts_generators' must be a pandas DataFrame.") + + # write to TimeSeriesRaw + for col in ts_generators: + self.time_series_raw.dispatchable_generators_active_power_by_technology[ + col + ] = ts_generators[col] + + # set generator_names if None + if generator_names is None: + if "other" in ts_generators.columns: + generator_names = edisgo_object.topology.generators_df[ + ~edisgo_object.topology.generators_df.type.isin(["solar", "wind"]) + ].index + else: + generator_names = edisgo_object.topology.generators_df[ + edisgo_object.topology.generators_df.type.isin( + ts_generators.columns + ) + ].index + generator_names = _check_if_components_exist( + edisgo_object, generator_names, "generators" + ) + generators_df = edisgo_object.topology.generators_df.loc[generator_names, :] - """ - # If timeseries have not been set yet, it is not - # necessary to add timeseries - if not hasattr(edisgo_obj.timeseries, "mode"): - logger.debug( - "Timeseries have not been set yet. Please call " - "get_component_timeseries to create " - "timeseries." + # drop existing time series + drop_component_time_series( + obj=self, df_name="generators_active_power", comp_names=generator_names ) - return - # turn single name to list - if isinstance(generator_names, str): - generator_names = [generator_names] - # append timeseries of respective mode - if edisgo_obj.timeseries.mode: - if "worst-case" in edisgo_obj.timeseries.mode: - modes = _get_worst_case_modes(edisgo_obj.timeseries.mode) - # set random timeindex - _worst_case_generation( - edisgo_obj=edisgo_obj, - modes=modes, - generator_names=generator_names, - ) - elif edisgo_obj.timeseries.mode == "manual": - # check inserted timeseries and drop existing generators - gens_active_power = kwargs.get("generators_active_power", None) - if gens_active_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, gens_active_power, generator_names - ) - gens_reactive_power = kwargs.get("generators_reactive_power", None) - if gens_reactive_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, gens_reactive_power, generator_names - ) - _drop_existing_component_timeseries( - edisgo_obj, "generators", generator_names - ) - # add new timeseries - edisgo_obj.timeseries.generators_active_power = pd.concat( + + # scale time series by nominal power + ts_scaled = generators_df.apply( + lambda x: ts_generators[x.type] * x.p_nom + if x.type in ts_generators.columns + else ts_generators["other"] * x.p_nom, + axis=1, + ).T + if not ts_scaled.empty: + self.generators_active_power = pd.concat( [ - edisgo_obj.timeseries.generators_active_power, - gens_active_power.loc[:, generator_names], + self.generators_active_power, + ts_scaled, ], axis=1, + sort=False, ) - edisgo_obj.timeseries.generators_reactive_power = pd.concat( - [ - edisgo_obj.timeseries.generators_reactive_power, - gens_reactive_power.loc[:, generator_names], - ], - axis=1, + + def predefined_conventional_loads_by_sector( + self, edisgo_object, ts_loads, load_names=None + ): + """ + Set active power demand time series for conventional loads by sector. + + Parameters + ---------- + edisgo_object : :class:`~.EDisGo` + ts_loads : str or :pandas:`pandas.DataFrame` + Defines which sector-specific active power time series to use. + Possible options are: + + * 'demandlib' + + Time series for the year specified :py:attr:`~timeindex` are + generated using standard electric load profiles from the oemof + `demandlib `_. + The demandlib provides sector-specific time series for the sectors + 'residential', 'retail', 'industrial', and 'agricultural'. + + * :pandas:`pandas.DataFrame` + + DataFrame with load time series per sector normalized to an annual + consumption of 1. Index needs to + be a :pandas:`pandas.DatetimeIndex`. + Columns contain the sector as string. + In the current grid existing load types can be retrieved from column + `sector` in :attr:`~.network.topology.Topology.loads_df` (make sure to + select `type` 'conventional_load'). + In ding0 grid the differentiated sectors are 'residential', 'retail', + 'industrial', and 'agricultural'. + load_names : list(str) + Defines for which conventional loads to use sector-specific time series. + If None, all loads of sectors for which sector-specific time series are + provided are used. In case the demandlib is used, all loads of sectors + 'residential', 'retail', 'industrial', and 'agricultural' are used. + + """ + # in case time series from demandlib are used, retrieve demandlib time series + if isinstance(ts_loads, str) and ts_loads == "demandlib": + ts_loads = timeseries_import.load_time_series_demandlib( + edisgo_object.config, timeindex=self.timeindex ) - else: + elif not isinstance(ts_loads, pd.DataFrame): raise ValueError( - "{} is not a valid mode.".format(edisgo_obj.timeseries.mode) + "'ts_loads' must either be a pandas DataFrame or 'demandlib'." ) - else: - ts_dispatchable = kwargs.get("timeseries_generation_dispatchable", None) - if ts_dispatchable is not None: - if hasattr(edisgo_obj.timeseries, "generation_dispatchable"): - edisgo_obj.timeseries.generation_dispatchable = drop_duplicated_columns( - pd.concat( - [ - edisgo_obj.timeseries.generation_dispatchable, - ts_dispatchable, - ], - axis=1, - ), - keep="last", - ) - else: - edisgo_obj.timeseries.generation_dispatchable = ts_dispatchable - - ts_reactive_power = kwargs.get("generation_reactive_power", None) - if ts_reactive_power is not None: - if hasattr(edisgo_obj.timeseries, "generation_reactive_power"): - edisgo_obj.timeseries.generation_reactive_power = ( - drop_duplicated_columns( - pd.concat( - [ - edisgo_obj.timeseries.generation_reactive_power, - ts_reactive_power, - ], - axis=1, - ), - keep="last", - ) - ) - else: - edisgo_obj.timeseries.generation_reactive_power = ts_reactive_power - # create load active and reactive power timeseries - _generation_from_timeseries( - edisgo_obj=edisgo_obj, generator_names=generator_names + elif ts_loads.empty: + logger.warning("The profile you entered is empty. Method is skipped.") + return + + # write to TimeSeriesRaw + for col in ts_loads: + self.time_series_raw.conventional_loads_active_power_by_sector[ + col + ] = ts_loads[col] + + # set load_names if None + if load_names is None: + sectors = ts_loads.columns.unique() + load_names = edisgo_object.topology.loads_df[ + edisgo_object.topology.loads_df.sector.isin(sectors) + ].index + load_names = _check_if_components_exist(edisgo_object, load_names, "loads") + loads_df = edisgo_object.topology.loads_df.loc[load_names, :] + + # drop existing time series + drop_component_time_series( + obj=self, df_name="loads_active_power", comp_names=load_names ) + # scale time series by annual consumption + self.loads_active_power = pd.concat( + [ + self.loads_active_power, + loads_df.apply( + lambda x: ts_loads[x.sector] * x.annual_consumption, + axis=1, + ).T, + ], + axis=1, + ) -def add_charging_points_timeseries(edisgo_obj, charging_point_names, **kwargs): - """ - Define generator time series for active and reactive power. + def predefined_charging_points_by_use_case( + self, edisgo_object, ts_loads, load_names=None + ): + """ + Set active power demand time series for charging points by their use case. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - charging_point_names: str or list of str - Names of charging points to add timeseries for. + Parameters + ---------- + edisgo_object : :class:`~.EDisGo` + ts_loads : :pandas:`pandas.DataFrame` + DataFrame with self-provided load time series per use case normalized to + a nominal power of the charging point of 1. + Index needs to be a :pandas:`pandas.DatetimeIndex`. + Columns contain the use case as string. + In the current grid existing use case types can be retrieved from column + `sector` in :attr:`~.network.topology.Topology.loads_df` (make sure to + select `type` 'charging_point'). + When using charging point input from SimBEV the differentiated use cases are + 'home', 'work', 'public' and 'hpc'. + load_names : list(str) + Defines for which charging points to use use-case-specific time series. + If None, all charging points of use cases for which use-case-specific time + series are provided are used. - Other Parameters - ----------------- - ts_active_power: :pandas:`pandas.DataFrame` - Active power time series in MW. - ts_reactive_power: :pandas:`pandas.DataFrame` - Reactive power time series in MW. + """ + if not isinstance(ts_loads, pd.DataFrame): + raise ValueError("'ts_loads' must be a pandas DataFrame.") + elif ts_loads.empty: + logger.warning("The profile you entered is empty. Method is skipped.") + return + + # write to TimeSeriesRaw + for col in ts_loads: + self.time_series_raw.charging_points_active_power_by_use_case[ + col + ] = ts_loads[col] + + # set load_names if None + if load_names is None: + sectors = ts_loads.columns.unique() + load_names = edisgo_object.topology.loads_df[ + edisgo_object.topology.loads_df.sector.isin(sectors) + ].index + load_names = _check_if_components_exist(edisgo_object, load_names, "loads") + loads_df = edisgo_object.topology.loads_df.loc[load_names, :] + + # check if all loads are charging points and throw warning if not + if not all(loads_df.type.isin(["charging_point"])): + raise Warning( + "Not all affected loads are charging points. Please check and" + " adapt if necessary." + ) - """ - # TODO: only provision of time series is implemented, worst_case etc. - # is missing - ts_active_power = kwargs.get("ts_active_power", None) - if ts_active_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, ts_active_power, charging_point_names + # drop existing time series + drop_component_time_series( + obj=self, df_name="loads_active_power", comp_names=load_names ) - ts_reactive_power = kwargs.get("ts_reactive_power", None) - if ts_reactive_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, - ts_reactive_power, - charging_point_names, - ) - _drop_existing_component_timeseries( - edisgo_obj, "charging_points", charging_point_names - ) - # add new timeseries - edisgo_obj.timeseries.charging_points_active_power = pd.concat( - [edisgo_obj.timeseries.charging_points_active_power, ts_active_power], - axis=1, - sort=False, - ) - edisgo_obj.timeseries.charging_points_reactive_power = pd.concat( - [ - edisgo_obj.timeseries.charging_points_reactive_power, - ts_reactive_power, - ], - axis=1, - sort=False, - ) + # scale time series by nominal power + self.loads_active_power = pd.concat( + [ + self.loads_active_power, + loads_df.apply( + lambda x: ts_loads[x.sector] * x.p_set, + axis=1, + ).T, + ], + axis=1, + ) -def add_storage_units_timeseries(edisgo_obj, storage_unit_names, **kwargs): - """ - Define storage unit time series for active and reactive power. For more - information on required and optional parameters see description of - :func:`get_component_timeseries`. The mode initially set within - get_component_timeseries is used here to set new timeseries. If a different - mode is required, change edisgo_obj.timeseries.mode to the desired mode and - provide respective parameters. - - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - storage_unit_names: str or list of str - Names of storage units to add timeseries for. Default None, timeseries - for all storage units of edisgo_obj are set then. + def fixed_cosphi( + self, + edisgo_object, + generators_parametrisation=None, + loads_parametrisation=None, + storage_units_parametrisation=None, + ): + """ + Sets reactive power of specified components assuming a fixed power factor. - """ - # if timeseries have not been set yet, it is not - # necessary to add timeseries - if not hasattr(edisgo_obj.timeseries, "mode"): - logger.debug( - "Timeseries have not been set yet. Please call" - "get_components_timeseries to create timeseries." - ) - return - # turn single name to list - if isinstance(storage_unit_names, str): - storage_unit_names = [storage_unit_names] - # append timeseries of respective mode - if edisgo_obj.timeseries.mode: - if "worst-case" in edisgo_obj.timeseries.mode: - modes = _get_worst_case_modes(edisgo_obj.timeseries.mode) - # set random timeindex - _worst_case_storage( - edisgo_obj=edisgo_obj, - modes=modes, - storage_names=storage_unit_names, - ) - elif edisgo_obj.timeseries.mode == "manual": - storage_units_active_power = kwargs.get("storage_units_active_power", None) - if storage_units_active_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, storage_units_active_power, storage_unit_names + Overwrites reactive power time series in case they already exist. + + Parameters + ----------- + generators_parametrisation : str or :pandas:`pandas.DataFrame` or \ + None + Sets fixed cosphi parameters for generators. + Possible options are: + + * 'default' + + Default configuration is used for all generators in the grid. + To this end, the power factors set in the config section + `reactive_power_factor` and the power factor mode, defining whether + components behave inductive or capacitive, given in the config section + `reactive_power_mode`, are used. + + * :pandas:`pandas.DataFrame` + + DataFrame with fix cosphi parametrisation for specified generators. + Columns are: + + * 'components' : list(str) + List with generators to apply parametrisation for. + + * 'mode' : str + Defines whether generators behave inductive or capacitive. + Possible options are 'inductive', 'capacitive' or 'default'. + In case of 'default', configuration from config section + `reactive_power_mode` is used. + + * 'power_factor' : float or str + Defines the fixed cosphi power factor. The power factor can + either be directly provided as float or it can be set to + 'default', in which case configuration from config section + `reactive_power_factor` is used. + + Index of the dataframe is ignored. + + * None + + No reactive power time series are set. + + Default: None. + loads_parametrisation : str or :pandas:`pandas.DataFrame` or None + Sets fixed cosphi parameters for loads. The same options as for parameter + `generators_parametrisation` apply. + storage_units_parametrisation : str or :pandas:`pandas.DataFrame` \ + or None + Sets fixed cosphi parameters for storage units. The same options as for + parameter `generators_parametrisation` apply. + + Notes + ------ + This function requires active power time series to be previously set. + + """ + + def _get_q_sign_and_power_factor_per_component( + parametrisation, components_df, type, q_sign_func + ): + # default configuration + if isinstance(parametrisation, str) and parametrisation == "default": + # get default parametrisation from config + df = assign_voltage_level_to_component( + components_df, edisgo_object.topology.buses_df ) - storage_units_reactive_power = kwargs.get( - "storage_units_reactive_power", None - ) - if storage_units_reactive_power is not None: - check_timeseries_for_index_and_cols( - edisgo_obj, - storage_units_reactive_power, - storage_unit_names, + components_names = df.index + q_sign = q_control._fixed_cosphi_default_reactive_power_sign( + df, type, edisgo_object.config ) - _drop_existing_component_timeseries( - edisgo_obj, "storage_units", storage_unit_names - ) - # add new storage timeseries - edisgo_obj.timeseries.storage_units_active_power = pd.concat( - [ - edisgo_obj.timeseries.storage_units_active_power, - storage_units_active_power.loc[:, storage_unit_names], + power_factor = q_control._fixed_cosphi_default_power_factor( + df, type, edisgo_object.config + ) + elif isinstance(parametrisation, pd.DataFrame): + # check if all given components exist in network and only use existing + components_names = list( + itertools.chain.from_iterable(parametrisation.components) + ) + components_names = _check_if_components_exist( + edisgo_object, components_names, type + ) + # set up series with sign of reactive power and power factors + q_sign = pd.Series() + power_factor = pd.Series() + for index, row in parametrisation.iterrows(): + # get only components that exist in the network + comps = [_ for _ in row["components"] if _ in components_names] + if len(comps) > 0: + # get q_sign (default or given) + if row["mode"] == "default": + df = assign_voltage_level_to_component( + components_df.loc[comps, :], + edisgo_object.topology.buses_df, + ) + q_sign = pd.concat( + [ + q_sign, + q_control._fixed_cosphi_default_reactive_power_sign( + df, type, edisgo_object.config + ), + ] + ) + else: + q_sign = pd.concat( + [ + q_sign, + pd.Series(q_sign_func(row["mode"]), index=comps), + ] + ) + # get power factor (default or given) + if row["power_factor"] == "default": + df = assign_voltage_level_to_component( + components_df.loc[comps, :], + edisgo_object.topology.buses_df, + ) + power_factor = pd.concat( + [ + power_factor, + q_control._fixed_cosphi_default_power_factor( + df, type, edisgo_object.config + ), + ] + ) + else: + power_factor = pd.concat( + [ + power_factor, + pd.Series(row["power_factor"], index=comps), + ] + ) + else: + raise ValueError( + f"'{type}_parametrisation' must either be a pandas DataFrame or " + f"'default'." + ) + + # write reactive power configuration to TimeSeriesRaw + # delete existing previous settings + self.time_series_raw.q_control.drop( + index=self.time_series_raw.q_control.index[ + self.time_series_raw.q_control.index.isin(components_names) ], - axis=1, + inplace=True, ) - edisgo_obj.timeseries.storage_units_reactive_power = pd.concat( + self.time_series_raw.q_control = pd.concat( [ - edisgo_obj.timeseries.storage_units_reactive_power, - storage_units_reactive_power.loc[:, storage_unit_names], - ], - axis=1, + self.time_series_raw.q_control, + pd.DataFrame( + index=components_names, + data={ + "type": "fixed_cosphi", + "q_sign": q_sign, + "power_factor": power_factor, + }, + ), + ] ) - else: - raise ValueError( - "{} is not a valid mode.".format(edisgo_obj.timeseries.mode) + + # drop existing time series + drop_component_time_series( + obj=self, df_name=f"{type}_reactive_power", comp_names=components_names + ) + + return q_sign, power_factor + + # set reactive power for generators + if ( + generators_parametrisation is not None + and not edisgo_object.topology.generators_df.empty + ): + q_sign, power_factor = _get_q_sign_and_power_factor_per_component( + parametrisation=generators_parametrisation, + components_df=edisgo_object.topology.generators_df, + type="generators", + q_sign_func=q_control.get_q_sign_generator, + ) + # calculate reactive power + reactive_power = q_control.fixed_cosphi( + self.generators_active_power.loc[:, q_sign.index], q_sign, power_factor + ) + self.generators_reactive_power = pd.concat( + [self.generators_reactive_power, reactive_power], axis=1 + ) + if ( + loads_parametrisation is not None + and not edisgo_object.topology.loads_df.empty + ): + q_sign, power_factor = _get_q_sign_and_power_factor_per_component( + parametrisation=loads_parametrisation, + components_df=edisgo_object.topology.loads_df, + type="loads", + q_sign_func=q_control.get_q_sign_load, ) - else: - # create load active and reactive power timeseries - _storage_from_timeseries( - edisgo_obj=edisgo_obj, - name_storage_units=storage_unit_names, - ts_active_power=kwargs.get("timeseries_storage_units", None), - ts_reactive_power=kwargs.get( - "timeseries_storage_units_reactive_power", None - ), + # calculate reactive power + reactive_power = q_control.fixed_cosphi( + self.loads_active_power.loc[:, q_sign.index], q_sign, power_factor + ) + self.loads_reactive_power = pd.concat( + [self.loads_reactive_power, reactive_power], axis=1 + ) + if ( + storage_units_parametrisation is not None + and not edisgo_object.topology.storage_units_df.empty + ): + q_sign, power_factor = _get_q_sign_and_power_factor_per_component( + parametrisation=storage_units_parametrisation, + components_df=edisgo_object.topology.storage_units_df, + type="storage_units", + q_sign_func=q_control.get_q_sign_generator, + ) + # calculate reactive power + reactive_power = q_control.fixed_cosphi( + self.storage_units_active_power.loc[:, q_sign.index], + q_sign, + power_factor, + ) + self.storage_units_reactive_power = pd.concat( + [self.storage_units_reactive_power, reactive_power], axis=1 + ) + + @property + def residual_load(self): + """ + Returns residual load in network. + + Residual load for each time step is calculated from total load + minus total generation minus storage active power (discharge is + positive). + A positive residual load represents a load case while a negative + residual load here represents a feed-in case. + Grid losses are not considered. + + Returns + ------- + :pandas:`pandas.Series` + Series with residual load in MW. + + """ + return ( + self.loads_active_power.sum(axis=1) + - self.generators_active_power.sum(axis=1) + - self.storage_units_active_power.sum(axis=1) ) + @property + def timesteps_load_feedin_case(self): + """ + Contains residual load and information on feed-in and load case. -def _drop_existing_component_timeseries(edisgo_obj, comp_type, comp_names): - """ - Drop columns of active and reactive power timeseries of 'comp_type' - components with names 'comp_names'. + Residual load is calculated from total (load - generation) in the + network. Grid losses are not considered. - Parameters - ---------- - edisgo_obj: :class:`~.self.edisgo.EDisGo` - The eDisGo model overall container - comp_type: str - Specification of component type, either 'loads', 'generators' or - 'storage_units' - comp_names: list of str - List of names of components that are to be dropped + Feed-in and load case are identified based on the + generation, load and storage time series and defined as follows: - """ - if isinstance(comp_names, str): - comp_names = [comp_names] - # drop existing timeseries of component - setattr( - edisgo_obj.timeseries, - comp_type + "_active_power", - getattr(edisgo_obj.timeseries, comp_type + "_active_power").drop( - getattr(edisgo_obj.timeseries, comp_type + "_active_power").columns[ - getattr( - edisgo_obj.timeseries, comp_type + "_active_power" - ).columns.isin(comp_names) - ], - axis=1, - ), - ) - setattr( - edisgo_obj.timeseries, - comp_type + "_reactive_power", - getattr(edisgo_obj.timeseries, comp_type + "_reactive_power").drop( - getattr(edisgo_obj.timeseries, comp_type + "_reactive_power").columns[ - getattr( - edisgo_obj.timeseries, comp_type + "_reactive_power" - ).columns.isin(comp_names) - ], - axis=1, - ), - ) + 1. Load case: positive (load - generation - storage) at HV/MV + substation + 2. Feed-in case: negative (load - generation - storage) at HV/MV + substation + + Returns + ------- + :pandas:`pandas.Series` + + Series with information on whether time step is handled as load + case ('load_case') or feed-in case ('feed-in_case') for each time + step in :py:attr:`~timeindex`. + + """ + + return self.residual_load.apply( + lambda _: "feed-in_case" if _ < 0.0 else "load_case" + ) + @property + def _attributes(self): + return [ + "loads_active_power", + "loads_reactive_power", + "generators_active_power", + "generators_reactive_power", + "storage_units_active_power", + "storage_units_reactive_power", + ] + + def reduce_memory( + self, attr_to_reduce=None, to_type="float32", time_series_raw=True, **kwargs + ): + """ + Reduces size of dataframes to save memory. + + See :attr:`EDisGo.reduce_memory` for more information. + + Parameters + ----------- + attr_to_reduce : list(str), optional + List of attributes to reduce size for. Per default, all active + and reactive power time series of generators, loads, and storage units + are reduced. + to_type : str, optional + Data type to convert time series data to. This is a tradeoff + between precision and memory. Default: "float32". + time_series_raw : bool, optional + If True raw time series data in :py:attr:`~time_series_raw` is reduced + as well. Default: True. + + Other Parameters + ------------------ + attr_to_reduce_raw : list(str), optional + List of attributes in :class:`~.network.timeseries.TimeSeriesRaw` to reduce + size for. See :attr:`~.network.timeseries.TimeSeriesRaw.reduce_memory` + for default. + + """ + if attr_to_reduce is None: + attr_to_reduce = self._attributes + for attr in attr_to_reduce: + setattr( + self, + attr, + getattr(self, attr).apply(lambda _: _.astype(to_type)), + ) + if time_series_raw: + self.time_series_raw.reduce_memory( + kwargs.get("attr_to_reduce_raw", None), to_type=to_type + ) + + def to_csv(self, directory, reduce_memory=False, time_series_raw=False, **kwargs): + """ + Saves component time series to csv. + + Saves the following time series to csv files with the same file name + (if the time series dataframe is not empty): + + * loads_active_power and loads_reactive_power + * generators_active_power and generators_reactive_power + * storage_units_active_power and storage_units_reactive_power + + If parameter `time_series_raw` is set to True, raw time series data is saved + to csv as well. See :attr:`~.network.timeseries.TimeSeriesRaw.to_csv` + for more information. -def check_timeseries_for_index_and_cols(edisgo_obj, timeseries, component_names): + Parameters + ---------- + directory : str + Directory to save time series in. + reduce_memory : bool, optional + If True, size of dataframes is reduced using + :attr:`~.network.timeseries.TimeSeries.reduce_memory`. + Optional parameters of + :attr:`~.network.timeseries.TimeSeries.reduce_memory` + can be passed as kwargs to this function. Default: False. + time_series_raw : bool, optional + If True raw time series data in :py:attr:`~time_series_raw` is saved to csv + as well. Per default all raw time series data is then stored in a + subdirectory of the specified `directory` called "time_series_raw". Further, + if `reduce_memory` is set to True, raw time series data is reduced as well. + To change this default behavior please call + :attr:`~.network.timeseries.TimeSeriesRaw.to_csv` separately. + Default: False. + + Other Parameters + ------------------ + kwargs : + Kwargs may contain arguments of + :attr:`~.network.timeseries.TimeSeries.reduce_memory`. + + """ + if reduce_memory is True: + self.reduce_memory(**kwargs) + + os.makedirs(directory, exist_ok=True) + + for attr in self._attributes: + if not getattr(self, attr).empty: + getattr(self, attr).to_csv(os.path.join(directory, f"{attr}.csv")) + + if time_series_raw: + self.time_series_raw.to_csv( + directory=os.path.join(directory, "time_series_raw"), + reduce_memory=reduce_memory, + ) + + def from_csv(self, directory, time_series_raw=False, **kwargs): + """ + Restores time series from csv files. + + See :func:`~to_csv` for more information on which time series can be saved and + thus restored. + + Parameters + ---------- + directory : str + Directory time series are saved in. + time_series_raw : bool, optional + If True raw time series data is as well read in (see + :attr:`~.network.timeseries.TimeSeriesRaw.from_csv` for further + information). Directory data is restored from can be specified through + kwargs. + Default: False. + + Other Parameters + ------------------ + directory_raw : str, optional + Directory to read raw time series data from. Per default this is a + subdirectory of the specified `directory` called "time_series_raw". + + """ + timeindex = None + for attr in self._attributes: + path = os.path.join(directory, f"{attr}.csv") + if os.path.exists(path): + setattr( + self, + attr, + pd.read_csv(path, index_col=0, parse_dates=True), + ) + if timeindex is None: + timeindex = getattr(self, f"_{attr}").index + if timeindex is None: + timeindex = pd.DatetimeIndex([]) + self._timeindex = timeindex + + if time_series_raw: + self.time_series_raw.from_csv( + directory=kwargs.get( + "directory_raw", os.path.join(directory, "time_series_raw") + ) + ) + + def check_integrity(self): + """ + Check for NaN, duplicated indices or columns and if time series is empty. + """ + if len(self.timeindex) == 0: + logger.warning("No time index set. Empty time series will be returned.") + else: + for attr in self._attributes: + df = getattr(self, attr) + + if df.isnull().any().any(): + logger.warning(f"There are null values in {attr}") + + if any(df.index.duplicated()): + duplicated_labels = df.index[df.index.duplicated()].values + logger.warning( + f"{attr} has duplicated indices: {duplicated_labels}" + ) + + if any(df.columns.duplicated()): + duplicated_labels = df.columns[df.columns.duplicated()].values + logger.warning( + f"{attr} has duplicated columns: {duplicated_labels}" + ) + + +class TimeSeriesRaw: """ - Checks index and column names of inserted timeseries to make sure, they - have the right format. + Holds raw time series data, e.g. sector-specific demand and standing times of EV. + + Normalised time series are e.g. sector-specific demand time series or + technology-specific feed-in time series. Time series needed for + flexibilities are e.g. heat time series or curtailment time series. + + Attributes + ------------ + q_control : :pandas:`pandas.DataFrame` + Dataframe with information on applied reactive power control or in case of + conventional loads assumed reactive power behavior. Index of the dataframe are + the component names as in index of + :attr:`~.network.topology.Topology.generators_df`, + :attr:`~.network.topology.Topology.loads_df`, and + :attr:`~.network.topology.Topology.storage_units_df`. Columns are + "type" with the type of Q-control applied (can be "fixed_cosphi", "cosphi(P)", + or "Q(V)"), + "power_factor" with the (maximum) power factor, + "q_sign" giving the sign of the reactive power (only applicable to + "fixed_cosphi"), + "parametrisation" with the parametrisation of the + respective Q-control (only applicable to "cosphi(P)" and "Q(V)"). + fluctuating_generators_active_power_by_technology : \ + :pandas:`pandas.DataFrame` + DataFrame with feed-in time series per technology or technology and + weather cell ID normalized to a nominal capacity of 1. + Columns can either just contain the technology type as string or + be a :pandas:`pandas.MultiIndex` with the + first level containing the technology as string and the second level + the weather cell ID as integer. + Index is a :pandas:`pandas.DatetimeIndex`. + dispatchable_generators_active_power_by_technology : \ + :pandas:`pandas.DataFrame` + DataFrame with feed-in time series per technology normalized to a nominal + capacity of 1. + Columns contain the technology type as string. + Index is a :pandas:`pandas.DatetimeIndex`. + conventional_loads_active_power_by_sector : :pandas:`pandas.DataFrame` + DataFrame with load time series of each type of conventional load + normalized to an annual consumption of 1. Index needs to + be a :pandas:`pandas.DatetimeIndex`. + Columns represent load type. In ding0 grids the + differentiated sectors are 'residential', 'retail', 'industrial', and + 'agricultural'. + charging_points_active_power_by_use_case : :pandas:`pandas.DataFrame` + DataFrame with charging demand time series per use case normalized to a nominal + capacity of 1. + Columns contain the use case as string. + Index is a :pandas:`pandas.DatetimeIndex`. - Parameters - ---------- - timeseries: :pandas:`pandas.DataFrame` - inserted timeseries - component_names: list of str - names of components of which timeseries are to be added """ - if (~edisgo_obj.timeseries.timeindex.isin(timeseries.index)).any(): - raise ValueError( - "Inserted timeseries for the following " - "components have the a wrong time index: " - "{}. Values are missing.".format(component_names) + + def __init__(self): + self.q_control = pd.DataFrame( + columns=["type", "q_sign", "power_factor", "parametrisation"] + ) + self.fluctuating_generators_active_power_by_technology = pd.DataFrame( + dtype=float ) - if any(comp not in timeseries.columns for comp in component_names): - raise ValueError( - "Columns of inserted timeseries are not the same " - "as names of components to be added. Timeseries " - "for the following components were tried to be " - "added: {}".format(component_names) + self.dispatchable_generators_active_power_by_technology = pd.DataFrame( + dtype=float ) + self.conventional_loads_active_power_by_sector = pd.DataFrame(dtype=float) + self.charging_points_active_power_by_use_case = pd.DataFrame(dtype=float) + @property + def _attributes(self): + return [ + "q_control", + "fluctuating_generators_active_power_by_technology", + "dispatchable_generators_active_power_by_technology", + "conventional_loads_active_power_by_sector", + "charging_points_active_power_by_use_case", + ] -def _get_worst_case_modes(mode): - """ - Returns list of modes to be handled in worst case analysis. + def reduce_memory(self, attr_to_reduce=None, to_type="float32"): + """ + Reduces size of dataframes to save memory. - Parameters - ---------- - mode: str - string containing 'worst-case' and specifies case + See :attr:`EDisGo.reduce_memory` for more information. - Returns - ------- - modes: list of str - list which can contains 'feedin-case', 'load_case' or both - """ - if mode == "worst-case": - modes = ["feedin_case", "load_case"] - elif mode == "worst-case-feedin" or mode == "worst-case-load": - modes = ["{}_case".format(mode.split("-")[-1])] - else: - raise ValueError("{} is not a valid mode.".format(mode)) - return modes + Parameters + ----------- + attr_to_reduce : list(str), optional + List of attributes to reduce size for. Attributes need to be + dataframes containing only time series. Per default, all active + and reactive power time series of generators, loads, storage units + and charging points are reduced. + to_type : str, optional + Data type to convert time series data to. This is a tradeoff + between precision and memory. Default: "float32". + + """ + if attr_to_reduce is None: + attr_to_reduce = self._attributes + # remove attributes that do not contain only floats + if "q_control" in attr_to_reduce: + attr_to_reduce.remove("q_control") + for attr in attr_to_reduce: + if hasattr(self, attr) and getattr(self, attr) is not None: + setattr( + self, attr, getattr(self, attr).apply(lambda _: _.astype(to_type)) + ) + def to_csv(self, directory, reduce_memory=False, **kwargs): + """ + Saves time series to csv. -def _get_q_sign_generator(reactive_power_mode): - """ - Get the sign of reactive power in generator sign convention. + Saves all attributes that are set to csv files with the same file name. + See class definition for possible attributes. - In the generator sign convention the reactive power is negative in - inductive operation (`reactive_power_mode` is 'inductive') and positive - in capacitive operation (`reactive_power_mode` is 'capacitive'). + Parameters + ---------- + directory: str + Directory to save time series in. + reduce_memory : bool, optional + If True, size of dataframes is reduced using + :attr:`~.network.timeseries.TimeSeriesRaw.reduce_memory`. Optional + parameters of + :attr:`~.network.timeseries.TimeSeriesRaw.reduce_memory` + can be passed as kwargs to this function. Default: False. - Parameters - ---------- - reactive_power_mode : str - Possible options are 'inductive' and 'capacitive'. + Other Parameters + ------------------ + kwargs : + Kwargs may contain optional arguments of + :attr:`~.network.timeseries.TimeSeriesRaw.reduce_memory`. - Returns - -------- - int - Sign of reactive power in generator sign convention. + """ + if reduce_memory is True: + self.reduce_memory(**kwargs) - """ - if reactive_power_mode.lower() == "inductive": - return -1 - elif reactive_power_mode.lower() == "capacitive": - return 1 - else: - raise ValueError( - "reactive_power_mode must either be 'capacitive' " - "or 'inductive' but is {}.".format(reactive_power_mode) - ) + os.makedirs(directory, exist_ok=True) + for attr in self._attributes: + if hasattr(self, attr) and not getattr(self, attr).empty: + getattr(self, attr).to_csv(os.path.join(directory, f"{attr}.csv")) + + def from_csv(self, directory): + """ + Restores time series from csv files. + + See :func:`~to_csv` for more information on which time series are + saved. + + Parameters + ---------- + directory : str + Directory time series are saved in. + + """ + timeindex = None + for attr in self._attributes: + path = os.path.join(directory, f"{attr}.csv") + if os.path.exists(path): + setattr( + self, + attr, + pd.read_csv(path, index_col=0, parse_dates=True), + ) + if timeindex is None: + timeindex = getattr(self, f"{attr}").index + if timeindex is None: + timeindex = pd.DatetimeIndex([]) + self._timeindex = timeindex -def _get_q_sign_load(reactive_power_mode): - """ - Get the sign of reactive power in load sign convention. - In the load sign convention the reactive power is positive in - inductive operation (`reactive_power_mode` is 'inductive') and negative - in capacitive operation (`reactive_power_mode` is 'capacitive'). +def drop_component_time_series(obj, df_name, comp_names): + """ + Drop component time series. Parameters ---------- - reactive_power_mode : str - Possible options are 'inductive' and 'capacitive'. - - Returns - -------- - int - Sign of reactive power in load sign convention. + obj : obj + Object with attr `df_name` to remove columns from. Can e.g. be + :class:`~.network.timeseries.TimeSeries`. + df_name : str + Name of attribute of given object holding the dataframe to remove columns from. + Can e.g. be "generators_active_power" if time series should be removed from + :attr:`~.network.timeseries.TimeSeries.generators_active_power`. + comp_names: str or list(str) + Names of components to drop. """ - if reactive_power_mode.lower() == "inductive": - return 1 - elif reactive_power_mode.lower() == "capacitive": - return -1 - else: - raise ValueError( - "reactive_power_mode must either be 'capacitive' " - "or 'inductive' but is {}.".format(reactive_power_mode) - ) + if isinstance(comp_names, str): + comp_names = [comp_names] + # drop existing time series of component + setattr( + obj, + df_name, + getattr(obj, df_name).drop( + getattr(obj, df_name).columns[ + getattr(obj, df_name).columns.isin(comp_names) + ], + axis=1, + ), + ) -def fixed_cosphi(active_power, q_sign, power_factor): +def _add_component_time_series(obj, df_name, ts_new): """ - Calculates reactive power for a fixed cosphi operation. + Add component time series. Parameters ---------- - active_power : :pandas:`pandas.DataFrame` - Dataframe with active power time series. Columns of the dataframe are - names of the components and index of the dataframe are the time steps - reactive power is calculated for. - q_sign : :pandas:`pandas.Series` or int - `q_sign` defines whether the reactive power is positive or - negative and must either be -1 or +1. In case `q_sign` is given as a - series, the index must contain the same component names as given in - columns of parameter `active_power`. - power_factor : :pandas:`pandas.Series` or float - Ratio of real to apparent power. - In case `power_factor` is given as a series, the index must contain the - same component names as given in columns of parameter `active_power`. - - Returns - ------- - :pandas:`pandas.DataFrame` - Dataframe with the same format as the `active_power` dataframe, - containing the reactive power. + obj : obj + Object with attr `df_name` to add columns to. Can e.g. be + :class:`~.network.timeseries.TimeSeries`. + df_name : str + Name of attribute of given object holding the dataframe to add columns to. + Can e.g. be "generators_active_power" if time series should be added to + :attr:`~.network.timeseries.TimeSeries.generators_active_power`. + ts_new : :pandas:`pandas.DataFrame` + Dataframe with new time series to add to existing time series dataframe. """ - return active_power * q_sign * np.tan(np.arccos(power_factor)) + setattr( + obj, + df_name, + pd.concat( + [getattr(obj, df_name), ts_new], + axis=1, + ), + ) -def _set_reactive_power_time_series_for_fixed_cosphi_using_config( - edisgo_obj, df, component_type -): +def _check_if_components_exist(edisgo_object, component_names, component_type): """ - Calculates reactive power in Mvar for a fixed cosphi operation. + Checks if all provided components exist in the network. - This function adds the calculated reactive power time series to the - :class:`~.network.timeseries.TimeSeries` object. For - `component_type`='generators' time series is added to - :attr:`~.network.timeseries.TimeSeries.generators_reactive_power`, for - `component_type`='storage_units' time series is added to - :attr:`~.network.timeseries.TimeSeries.storage_units_reactive_power` and - for `component_type`='loads' time series is added to - :attr:`~.network.timeseries.TimeSeries.loads_reactive_power`. + Raises warning if there any provided components that are not in the network. Parameters ---------- - edisgo_obj : :class:`~.EDisGo` - df : :pandas:`pandas.DataFrame` - Dataframe with component names (in the index) of all components - reactive power needs to be calculated for. Only required column is - column 'bus', giving the name of the bus the component is connected to. - All components must have the same `component_type`. + edisgo_object : :class:`~.EDisGo` + component_names : list(str) + Names of components for which time series are added. component_type : str - Specifies whether to calculate reactive power for generators, storage - units or loads. The component type determines the power factor and - power mode used. Possible options are 'generators', 'storage_units' and - 'loads'. - - Notes - ----- - Reactive power is determined based on reactive power factors and reactive - power modes defined in the config file 'config_timeseries' in sections - 'reactive_power_factor' and 'reactive_power_mode'. Both are distinguished - between the voltage level the components are in (medium or low voltage). + The component type for which time series are added. + Possible options are 'generators', 'storage_units', 'loads'. + + Returns + -------- + set(str) + Returns a set of all provided components that are in the network. """ - if df.empty: - return - - # assign voltage level to generators - df = assign_voltage_level_to_component(edisgo_obj, df) - - # get default configurations - reactive_power_mode = edisgo_obj.config["reactive_power_mode"] - reactive_power_factor = edisgo_obj.config["reactive_power_factor"] - voltage_levels = df.voltage_level.unique() - - # write series with sign of reactive power and power factor - # for each component - q_sign = pd.Series(index=df.index) - power_factor = pd.Series(index=df.index) - if component_type in ["generators", "storage_units"]: - get_q_sign = _get_q_sign_generator - elif component_type == "loads": - get_q_sign = _get_q_sign_load - else: - raise ValueError( - "Given 'component_type' is not valid. Valid options are " - "'generators','storage_units' and 'loads'." - ) - for voltage_level in voltage_levels: - cols = df.index[df.voltage_level == voltage_level] - if len(cols) > 0: - q_sign[cols] = get_q_sign( - reactive_power_mode["{}_gen".format(voltage_level)] - ) - power_factor[cols] = reactive_power_factor["{}_gen".format(voltage_level)] + comps_in_network = getattr(edisgo_object.topology, f"{component_type}_df").index - # calculate reactive power time series and append to TimeSeries object - reactive_power_df = drop_duplicated_columns( - pd.concat( - [ - getattr(edisgo_obj.timeseries, component_type + "_reactive_power"), - fixed_cosphi( - getattr( - edisgo_obj.timeseries, component_type + "_active_power" - ).loc[:, df.index], - q_sign, - power_factor, - ), - ], - axis=1, - ), - keep="last", - ) + comps_not_in_network = list(set(component_names) - set(comps_in_network)) - setattr( - edisgo_obj.timeseries, - component_type + "_reactive_power", - reactive_power_df, - ) + if comps_not_in_network: + logging.warning( + f"Some of the provided {component_type} are not in the network. This " + f"concerns the following components: {comps_not_in_network}." + ) + + return set(component_names) - set(comps_not_in_network) + return component_names diff --git a/edisgo/network/topology.py b/edisgo/network/topology.py index b373a7c08..3d5f2d594 100755 --- a/edisgo/network/topology.py +++ b/edisgo/network/topology.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import os import random @@ -9,7 +11,6 @@ import edisgo -from edisgo.io.ding0_import import _validate_ding0_grid_import from edisgo.network.components import Switch from edisgo.network.grids import LVGrid, MVGrid from edisgo.tools import geo, networkx_helper @@ -25,10 +26,10 @@ from shapely.ops import transform from shapely.wkt import loads as wkt_loads -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) COLUMNS = { - "loads_df": ["bus", "p_nom", "type", "annual_consumption", "sector"], + "loads_df": ["bus", "p_set", "type", "annual_consumption", "sector"], "generators_df": [ "bus", "p_nom", @@ -37,7 +38,6 @@ "weather_cell_id", "subtype", ], - "charging_points_df": ["bus", "p_nom", "type", "sector"], "storage_units_df": ["bus", "control", "p_nom"], "transformers_df": ["bus0", "bus1", "x_pu", "r_pu", "s_nom", "type_info"], "lines_df": [ @@ -205,11 +205,14 @@ def loads_df(self): bus : str Identifier of bus load is connected to. - p_nom : float + p_set : float Peak load or nominal capacity in MW. type : str - Load type. E.g. 'load', 'charging_point' or 'heat_pump' + Type of load, e.g. 'conventional_load', 'charging_point' or 'heat_pump'. + This information is for example currently necessary when setting up a + worst case analysis, as different types of loads are treated + differently. annual_consumption : float Annual consumption in MWh. @@ -231,7 +234,7 @@ def loads_df(self): """ try: return self._loads_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["loads_df"]) @loads_df.setter @@ -287,7 +290,7 @@ def generators_df(self): """ try: return self._generators_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["generators_df"]) @generators_df.setter @@ -326,7 +329,7 @@ def storage_units_df(self): """ try: return self._storage_units_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["storage_units_df"]) @storage_units_df.setter @@ -371,7 +374,7 @@ def transformers_df(self): """ try: return self._transformers_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["transformers_df"]) @transformers_df.setter @@ -398,7 +401,7 @@ def transformers_hvmv_df(self): """ try: return self._transformers_hvmv_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["transformers_df"]) @transformers_hvmv_df.setter @@ -458,7 +461,7 @@ def lines_df(self): """ try: return self._lines_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["lines_df"]) @lines_df.setter @@ -505,14 +508,14 @@ def buses_df(self): """ try: return self._buses_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["buses_df"]) @buses_df.setter def buses_df(self, df): # make sure in_building takes on only True or False (not numpy bools) # needs to be tested using `== True`, not `is True` - buses_in_building = df[df.in_building == True].index + buses_in_building = df[df.in_building == True].index # noqa: E712 df.loc[buses_in_building, "in_building"] = True df.loc[~df.index.isin(buses_in_building), "in_building"] = False self._buses_df = df @@ -558,7 +561,7 @@ def switches_df(self): """ try: return self._switches_df - except: + except Exception: return pd.DataFrame(columns=COLUMNS["switches_df"]) @switches_df.setter @@ -566,9 +569,9 @@ def switches_df(self, df): self._switches_df = df @property - def charging_points_df(self, type="charging_point"): + def charging_points_df(self): """ - Returns a subset from :py:attr:`~loads_df` containing only charging points. + Returns a subset of :py:attr:`~loads_df` containing only charging points. Parameters ---------- @@ -581,12 +584,10 @@ def charging_points_df(self, type="charging_point"): Pandas DataFrame with all loads of the given type. """ - if type in self.loads_df.type.unique(): - return self.loads_df.loc[self.loads_df.type == type].dropna( - axis=1, how="all" - ) + if "charging_point" in self.loads_df.type.unique(): + return self.loads_df.loc[self.loads_df.type == "charging_point"] else: - return pd.DataFrame(columns=COLUMNS["charging_points_df"]) + return pd.DataFrame(columns=COLUMNS["loads_df"]) @property def id(self): @@ -727,8 +728,11 @@ def get_connected_lines_from_bus(self, bus_name): :attr:`~.network.topology.Topology.lines_df`. """ - return self.lines_df.loc[self.lines_df.bus0 == bus_name].append( - self.lines_df.loc[self.lines_df.bus1 == bus_name] + return pd.concat( + [ + self.lines_df.loc[self.lines_df.bus0 == bus_name], + self.lines_df.loc[self.lines_df.bus1 == bus_name], + ] ) def get_line_connecting_buses(self, bus_1, bus_2): @@ -780,22 +784,37 @@ def get_connected_components_from_bus(self, bus_name): components["generators"] = self.generators_df.loc[ self.generators_df.bus == bus_name ] + components["loads"] = self.loads_df.loc[self.loads_df.bus == bus_name] + components["storage_units"] = self.storage_units_df.loc[ self.storage_units_df.bus == bus_name ] + components["lines"] = self.get_connected_lines_from_bus(bus_name) - components["transformers"] = self.transformers_df.loc[ - self.transformers_df.bus0 == bus_name - ].append(self.transformers_df.loc[self.transformers_df.bus1 == bus_name]) - components["transformers_hvmv"] = self.transformers_hvmv_df.loc[ - self.transformers_hvmv_df.bus0 == bus_name - ].append( - self.transformers_hvmv_df.loc[self.transformers_hvmv_df.bus1 == bus_name] + + components["transformers"] = pd.concat( + [ + self.transformers_df.loc[self.transformers_df.bus0 == bus_name], + self.transformers_df.loc[self.transformers_df.bus1 == bus_name], + ] + ) + + components["transformers_hvmv"] = pd.concat( + [ + self.transformers_hvmv_df.loc[ + self.transformers_hvmv_df.bus0 == bus_name + ], + self.transformers_hvmv_df.loc[ + self.transformers_hvmv_df.bus1 == bus_name + ], + ] ) + components["switches"] = self.switches_df.loc[ self.switches_df.bus_closed == bus_name ] + return components def get_neighbours(self, bus_name): @@ -905,7 +924,7 @@ def _check_line_for_removal(self, line_name): return True return False - def add_load(self, bus, p_nom, type="load", **kwargs): + def add_load(self, bus, p_set, type="conventional_load", **kwargs): """ Adds load to topology. @@ -915,11 +934,11 @@ def add_load(self, bus, p_nom, type="load", **kwargs): ---------- bus : str See :py:attr:`~loads_df` for more information. - p_nom : float + p_set : float See :py:attr:`~loads_df` for more information. type : str See :py:attr:`~loads_df` for more information. - Default: "load" + Default: "conventional_load" Other Parameters ----------------- @@ -951,7 +970,7 @@ def add_load(self, bus, p_nom, type="load", **kwargs): else: grid_name = "MVGrid_" + str(int(bus_s.mv_grid_id)) - type_name = "".join([val.capitalize() for val in type.split("_")]) + type_name = "_".join([val.capitalize() for val in type.split("_")]) tmp = f"{type_name}_{grid_name}" @@ -965,7 +984,7 @@ def add_load(self, bus, p_nom, type="load", **kwargs): self._grids[grid_name].loads_df.type == type ] - load_id = len(type_df) + load_id = len(type_df) + 1 load_name = f"{tmp}_{load_id}" @@ -978,7 +997,7 @@ def add_load(self, bus, p_nom, type="load", **kwargs): # create new load dataframe data = { "bus": bus, - "p_nom": p_nom, + "p_set": p_set, "type": type, } data.update(kwargs) @@ -997,7 +1016,12 @@ def add_load(self, bus, p_nom, type="load", **kwargs): for col in new_df.columns: new_df[col] = pd.to_numeric(new_df[col], errors="ignore") - self._loads_df = self.loads_df.append(new_df) + self._loads_df = pd.concat( + [ + self.loads_df, + new_df, + ] + ) return load_name @@ -1058,7 +1082,7 @@ def add_generator(self, bus, p_nom, generator_type, control="PQ", **kwargs): while generator_name in self.generators_df.index: random.seed(a=generator_name) generator_name = "Generator_{}_{}".format( - tmp, random.randint(10 ** 8, 10 ** 9) + tmp, random.randint(10**8, 10**9) ) # create new generator dataframe @@ -1083,7 +1107,12 @@ def add_generator(self, bus, p_nom, generator_type, control="PQ", **kwargs): for col in new_df.columns: new_df[col] = pd.to_numeric(new_df[col], errors="ignore") - self.generators_df = self.generators_df.append(new_df) + self.generators_df = pd.concat( + [ + self.generators_df, + new_df, + ] + ) return generator_name def add_storage_unit(self, bus, p_nom, control="PQ", **kwargs): @@ -1121,14 +1150,14 @@ def add_storage_unit(self, bus, p_nom, control="PQ", **kwargs): grid_name = "LVGrid_" + str(int(bus_s.lv_grid_id)) else: grid_name = "MVGrid_" + str(int(bus_s.mv_grid_id)) - storage_id = len(self._grids[grid_name].storage_units_df) + storage_id = len(self._grids[grid_name].storage_units_df) + 1 storage_name = "StorageUnit_{}_{}".format(grid_name, storage_id) if storage_name in self.storage_units_df.index: storage_name = "StorageUnit_{}_{}".format(grid_name, storage_id + 1) while storage_name in self.storage_units_df.index: random.seed(a=storage_name) storage_name = "StorageUnit_{}_{}".format( - grid_name, random.randint(10 ** 8, 10 ** 9) + grid_name, random.randint(10**8, 10**9) ) # create new storage unit dataframe @@ -1148,7 +1177,12 @@ def add_storage_unit(self, bus, p_nom, control="PQ", **kwargs): for col in new_df.columns: new_df[col] = pd.to_numeric(new_df[col], errors="ignore") - self.storage_units_df = self.storage_units_df.append(new_df) + self.storage_units_df = pd.concat( + [ + self.storage_units_df, + new_df, + ] + ) return storage_name def add_line(self, bus0, bus1, length, **kwargs): @@ -1188,24 +1222,21 @@ def _get_line_data(): Line data from equipment_data. """ - if self.buses_df.loc[bus0, "v_nom"] < 1: - voltage_level = "lv" - else: - voltage_level = "mv" - + voltage_level = "lv" if self.buses_df.loc[bus0, "v_nom"] < 1 else "mv" # try to get cable data try: - line_data = self.equipment_data["{}_cables".format(voltage_level)].loc[ + line_data = self.equipment_data[f"{voltage_level}_cables"].loc[ type_info, : ] except KeyError: try: line_data = self.equipment_data[ - "{}_overhead_lines".format(voltage_level) + f"{voltage_level}_overhead_lines" ].loc[type_info, :] - except: + + except Exception: raise ValueError("Specified line type is not valid.") - except: + except Exception: raise return line_data @@ -1230,7 +1261,12 @@ def _get_line_data(): ] if not bus0_bus1.empty and bus1_bus0.empty: logging.debug("Line between bus0 {} and bus1 {} already exists.") - return bus1_bus0.append(bus0_bus1).index[0] + return pd.concat( + [ + bus1_bus0, + bus0_bus1, + ] + ).index[0] # unpack optional parameters x = kwargs.get("x", None) @@ -1264,7 +1300,7 @@ def _get_line_data(): while line_name in self.lines_df.index: random.seed(a=line_name) line_name = "Line_{}_{}_{}".format( - bus0, bus1, random.randint(10 ** 8, 10 ** 9) + bus0, bus1, random.randint(10**8, 10**9) ) # check if all necessary data is now available @@ -1289,7 +1325,12 @@ def _get_line_data(): }, index=[line_name], ) - self.lines_df = self.lines_df.append(new_line_df) + self.lines_df = pd.concat( + [ + self.lines_df, + new_line_df, + ] + ) return line_name def add_bus(self, bus_name, v_nom, **kwargs): @@ -1326,14 +1367,14 @@ def add_bus(self, bus_name, v_nom, **kwargs): # check uniqueness of provided bus name and otherwise change bus name while bus_name in self.buses_df.index: random.seed(a=bus_name) - bus_name = "Bus_{}".format(random.randint(10 ** 8, 10 ** 9)) + bus_name = f"Bus_{random.randint(10**8, 10**9)}" x = kwargs.get("x", None) y = kwargs.get("y", None) - lv_grid_id = kwargs.get("lv_grid_id", None) + lv_grid_id = kwargs.get("lv_grid_id", np.nan) in_building = kwargs.get("in_building", False) # check lv_grid_id - if v_nom < 1 and lv_grid_id is None: + if v_nom < 1 and np.isnan(lv_grid_id): raise ValueError("You need to specify an lv_grid_id for low-voltage buses.") new_bus_df = pd.DataFrame( data={ @@ -1346,13 +1387,20 @@ def add_bus(self, bus_name, v_nom, **kwargs): }, index=[bus_name], ) - self.buses_df = self.buses_df.append(new_bus_df) + self.buses_df = pd.concat( + [ + self.buses_df, + new_bus_df, + ] + ) return bus_name def remove_load(self, name): """ Removes load with given name from topology. + If no other elements are connected, line and bus are removed as well. + Parameters ---------- name : str @@ -1367,14 +1415,14 @@ def remove_load(self, name): if self._check_bus_for_removal(bus): line_name = self.get_connected_lines_from_bus(bus).index[0] self.remove_line(line_name) - logger.debug( - "Line {} removed together with load {}.".format(line_name, name) - ) + logger.debug(f"Line {line_name} removed together with load {name}.") def remove_generator(self, name): """ Removes generator with given name from topology. + If no other elements are connected, line and bus are removed as well. + Parameters ---------- name : str @@ -1392,15 +1440,15 @@ def remove_generator(self, name): line_name = self.get_connected_lines_from_bus(bus).index[0] self.remove_line(line_name) logger.debug( - "Line {} removed together with generator {}.".format( - line_name, name - ) + f"Line {line_name} removed together with generator {name}." ) def remove_storage_unit(self, name): """ Removes storage with given name from topology. + If no other elements are connected, line and bus are removed as well. + Parameters ---------- name : str @@ -1418,15 +1466,16 @@ def remove_storage_unit(self, name): line_name = self.get_connected_lines_from_bus(bus).index[0] self.remove_line(line_name) logger.debug( - "Line {} removed together with storage unit {}.".format( - line_name, name - ) + f"Line {line_name} removed together with storage unit {name}." ) def remove_line(self, name): """ Removes line with given name from topology. + Line is only removed, if it does not result in isolated buses. A warning is + raised in that case. + Parameters ---------- name : str @@ -1434,9 +1483,11 @@ def remove_line(self, name): """ if not self._check_line_for_removal(name): - raise AssertionError( - "Removal of line {} would create isolated node.".format(name) + warnings.warn( + f"Removal of line {name} would create isolated node. Remove all " + "connected elements first to remove bus." ) + return # backup buses of line and check if buses can be removed as well bus0 = self.lines_df.at[name, "bus0"] @@ -1450,10 +1501,10 @@ def remove_line(self, name): # drop buses if no other elements are connected if remove_bus0: self.remove_bus(bus0) - logger.debug("Bus {} removed together with line {}".format(bus0, name)) + logger.debug(f"Bus {bus0} removed together with line {name}") if remove_bus1: self.remove_bus(bus1) - logger.debug("Bus {} removed together with line {}".format(bus1, name)) + logger.debug(f"Bus {bus1} removed together with line {name}") def remove_bus(self, name): """ @@ -1477,10 +1528,8 @@ def remove_bus(self, name): conn_comp_types = [k for k, v in conn_comp.items() if not v.empty] if len(conn_comp_types) > 0: warnings.warn( - "Bus {} is not isolated and therefore not removed. Remove all " - "connected elements ({}) first to remove bus.".format( - name, conn_comp_types - ) + f"Bus {name} is not isolated and therefore not removed. Remove all " + f"connected elements ({conn_comp_types}) first to remove bus." ) else: self._buses_df.drop(name, inplace=True) @@ -1553,15 +1602,14 @@ def change_line_type(self, lines, new_line_type): ] if grid_voltage != data_new_line.U_n: logging.debug( - "The line type of lines {} is changed to a type with " - "a different nominal voltage (nominal voltage of new " - "line type is {} kV while nominal voltage of the " - "medium voltage grid is {} kV). The nominal voltage " - "of the new line type is therefore set to the grids " - "nominal voltage.".format( - lines, data_new_line.U_n, grid_voltage - ) + f"The line type of lines {lines} is changed to a type with a " + f"different nominal voltage (nominal voltage of new line type " + f"is {data_new_line.U_n} kV while nominal voltage of the medium" + f" voltage grid is {grid_voltage} kV). The nominal voltage of " + f"the new line type is therefore set to the grids nominal " + f"voltage." ) + data_new_line.U_n = grid_voltage except KeyError: raise KeyError( @@ -1589,7 +1637,7 @@ def change_line_type(self, lines, new_line_type): np.sqrt(3) * data_new_line.U_n * data_new_line.I_max_th ) - def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): + def connect_to_mv(self, edisgo_object, comp_data, comp_type="generator"): """ Add and connect new generator or charging point to MV grid topology. @@ -1622,8 +1670,8 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): geolocation must be provided as :shapely:`Shapely Point object`. comp_type : str - Type of added component. Can be 'Generator' or 'ChargingPoint'. - Default: 'Generator'. + Type of added component. Can be 'generator' or 'charging_point'. + Default: 'generator'. Returns ------- @@ -1633,19 +1681,26 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): """ # ToDo connect charging points via transformer? + if "p" not in comp_data.keys(): + comp_data["p"] = ( + comp_data["p_set"] + if "p_set" in comp_data.keys() + else comp_data["p_nom"] + ) + # create new bus for new component - if not type(comp_data["geom"]) is Point: + if type(comp_data["geom"]) != Point: geom = wkt_loads(comp_data["geom"]) else: geom = comp_data["geom"] - if comp_type == "Generator": + if comp_type == "generator": if comp_data["generator_id"] is not None: - bus = "Bus_Generator_{}".format(comp_data["generator_id"]) + bus = f'Bus_Generator_{comp_data["generator_id"]}' else: - bus = "Bus_Generator_{}".format(len(self.generators_df)) + bus = f"Bus_Generator_{len(self.generators_df)}" else: - bus = "Bus_ChargingPoint_{}".format(len(self.charging_points_df)) + bus = f"Bus_ChargingPoint_{len(self.charging_points_df)}" self.add_bus( bus_name=bus, @@ -1655,7 +1710,7 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): ) # add component to newly created bus - if comp_type == "Generator": + if comp_type == "generator": comp_name = self.add_generator(bus=bus, **comp_data) else: comp_name = self.add_load(bus=bus, type="charging_point", **comp_data) @@ -1673,12 +1728,9 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): ], ) # avoid very short lines by limiting line length to at least 1m - if line_length < 0.001: - line_length = 0.001 + line_length = max(line_length, 0.001) - line_type, num_parallel = select_cable( - edisgo_object, "mv", comp_data["p_nom"] - ) + line_type, num_parallel = select_cable(edisgo_object, "mv", comp_data["p"]) line_name = self.add_line( bus0=self.mv_grid.station.index[0], @@ -1694,8 +1746,6 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): line=self.lines_df.loc[line_name], ) - # == voltage level 5: component is connected to MV grid - # (next-neighbor) == elif comp_data["voltage_level"] == 5: # get branches within the predefined `connection_buffer_radius` @@ -1730,7 +1780,7 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): # do not allow connection to virtual busses if "virtual" not in dist_min_obj["repr"]: line_type, num_parallel = select_cable( - edisgo_object, "mv", comp_data["p_nom"] + edisgo_object, "mv", comp_data["p"] ) target_obj_result = self._connect_mv_bus_to_target_object( edisgo_object=edisgo_object, @@ -1746,18 +1796,18 @@ def connect_to_mv(self, edisgo_object, comp_data, comp_type="Generator"): if not comp_connected: logger.error( - "Component {} could not be connected. Try to " - "increase the parameter `conn_buffer_radius` in " - "config file `config_grid.cfg` to gain more possible " - "connection points.".format(comp_name) + f"Component {comp_name} could not be connected. Try to increase the" + f" parameter `conn_buffer_radius` in config file `config_grid.cfg` " + f"to gain more possible connection points." ) + return comp_name def connect_to_lv( self, edisgo_object, comp_data, - comp_type="Generator", + comp_type="generator", allowed_number_of_comp_per_bus=2, ): """ @@ -1818,8 +1868,8 @@ def connect_to_lv( `bus` that is assigned in this function, and may contain all other parameters of those methods. Additionally, the dictionary must contain the voltage level to - connect in in key 'voltage_level' and may contain the geolocation - in key 'geom' and the LV grid ID to connect the component in in key + connect in key 'voltage_level' and may contain the geolocation + in key 'geom' and the LV grid ID to connect the component in key 'mvlv_subst_id'. The voltage level must be provided as integer, with possible options being 6 (component is connected directly to the MV/LV substation) or 7 (component is connected somewhere in the @@ -1827,8 +1877,8 @@ def connect_to_lv( :shapely:`Shapely Point object` and the LV grid ID as integer. comp_type : str - Type of added component. Can be 'Generator' or 'ChargingPoint'. - Default: 'Generator'. + Type of added component. Can be 'generator' or 'charging_point'. + Default: 'generator'. allowed_number_of_comp_per_bus : int Specifies, how many generators respectively charging points are at most allowed to be placed at the same bus. Default: 2. @@ -1844,9 +1894,15 @@ def connect_to_lv( predefined seed to ensure reproducibility. """ - global add_func + if "p" not in comp_data.keys(): + comp_data["p"] = ( + comp_data["p_set"] + if "p_set" in comp_data.keys() + else comp_data["p_nom"] + ) + def _connect_to_station(): """ Connects new component to substation via an own bus. @@ -1854,15 +1910,15 @@ def _connect_to_station(): """ # add bus for new component - if comp_type == "Generator": + if comp_type == "generator": if comp_data["generator_id"] is not None: - b = "Bus_Generator_{}".format(comp_data["generator_id"]) + b = f'Bus_Generator_{comp_data["generator_id"]}' else: - b = "Bus_Generator_{}".format(len(self.generators_df)) + b = f"Bus_Generator_{len(self.generators_df)}" else: - b = "Bus_ChargingPoint_{}".format(len(self.charging_points_df)) + b = f"Bus_ChargingPoint_{len(self.charging_points_df)}" - if not type(comp_data["geom"]) is Point: + if not isinstance(comp_data["geom"], Point): geom = wkt_loads(comp_data["geom"]) else: geom = comp_data["geom"] @@ -1886,12 +1942,10 @@ def _connect_to_station(): ], ) # avoid very short lines by limiting line length to at least 1m - if line_length < 0.001: - line_length = 0.001 + line_length = max(line_length, 0.001) + # get suitable line type - line_type, num_parallel = select_cable( - edisgo_object, "lv", comp_data["p_nom"] - ) + line_type, num_parallel = select_cable(edisgo_object, "lv", comp_data["p"]) line_name = self.add_line( bus0=station_bus, bus1=b, @@ -1915,7 +1969,7 @@ def _choose_random_substation_id(): substation ID is provided or it does not exist. """ - if comp_type == "Generator": + if comp_type == "generator": random.seed(a=comp_data["generator_id"]) else: # ToDo: Seed shouldn't depend on number of charging points, but @@ -1927,13 +1981,13 @@ def _choose_random_substation_id(): # get list of LV grid IDs lv_grid_ids = [_.id for _ in self.mv_grid.lv_grids] - if comp_type == "Generator": + if comp_type == "generator": add_func = self.add_generator - elif comp_type == "ChargingPoint": + elif comp_type == "charging_point": add_func = self.add_load - comp_data["type"] = "charging_point" + comp_data["type"] = comp_type else: - logger.error("Component type {} is not a valid option.".format(comp_type)) + logger.error(f"Component type {comp_type} is not a valid option.") if comp_data["mvlv_subst_id"]: @@ -1944,9 +1998,7 @@ def _choose_random_substation_id(): if comp_data["mvlv_subst_id"] in lv_grid_ids: # get LV grid - lv_grid = self._grids[ - "LVGrid_{}".format(int(comp_data["mvlv_subst_id"])) - ] + lv_grid = self._grids[f"LVGrid_{int(comp_data['mvlv_subst_id'])}"] # if substation ID (= LV grid ID) is given but it does not match an # existing LV grid ID a random LV grid to connect in is chosen @@ -1967,7 +2019,7 @@ def _choose_random_substation_id(): lv_grid = _choose_random_substation_id() warnings.warn( "Component has no mvlv_subst_id. It is therefore allocated " - "to a random LV Grid ({}).".format(lv_grid.id) + f"to a random LV Grid ({lv_grid.id})." ) # v_level 6 -> connect to grid's LV station @@ -1980,8 +2032,8 @@ def _choose_random_substation_id(): ): comp_name = add_func(bus=lv_grid.station.index[0], **comp_data) logger.debug( - "Component {} has no geom entry and will be connected " - "to grid's LV station.".format(comp_name) + f"Component {comp_name} has no geom entry and will be connected " + "to grid's LV station." ) else: comp_name = _connect_to_station() @@ -1992,8 +2044,8 @@ def _choose_random_substation_id(): # get valid buses to connect new component to lv_loads = lv_grid.loads_df - if comp_type == "Generator": - if comp_data["p_nom"] <= 0.030: + if comp_type == "generator": + if comp_data["p"] <= 0.030: tmp = lv_loads[lv_loads.sector == "residential"] target_buses = tmp.bus.values else: @@ -2017,13 +2069,13 @@ def _choose_random_substation_id(): # generate random list (unique elements) of possible target buses # to connect components to - if comp_type == "Generator": + if comp_type == "generator": random.seed(a=comp_data["generator_id"]) else: random.seed( a="{}_{}_{}".format( comp_data["sector"], - comp_data["p_nom"], + comp_data["p"], len(lv_grid.charging_points_df), ) ) @@ -2053,7 +2105,7 @@ def _choose_random_substation_id(): lv_bus = lv_buses_rnd.pop() # determine number of components of the same type at LV bus - if comp_type == "Generator": + if comp_type == "generator": comps_at_bus = self.generators_df[self.generators_df.bus == lv_bus] else: comps_at_bus = self.charging_points_df[ @@ -2179,6 +2231,7 @@ def _connect_mv_bus_to_target_object( length=line_length, kind=line_data.kind, type_info=line_data.type_info, + num_parallel=line_data.num_parallel, ) # if line connected to switch was split, write new line name to # switch data @@ -2207,6 +2260,7 @@ def _connect_mv_bus_to_target_object( length=line_length, kind=line_data.kind, type_info=line_data.type_info, + num_parallel=line_data.num_parallel, ) # if line connected to switch was split, write new line name to # switch data @@ -2301,6 +2355,36 @@ def to_graph(self): ) return graph + def to_geopandas(self, mode: str = "mv"): + """ + Returns components as :geopandas:`GeoDataFrame`\\ s + + Returns container with :geopandas:`GeoDataFrame`\\ s containing all + georeferenced components within the grid. + + Parameters + ---------- + mode : str + Return mode. If mode is "mv" the mv components are returned. If mode is "lv" + a generator with a container per lv grid is returned. Default: "mv" + + Returns + ------- + :class:`~.tools.geopandas_helper.GeoPandasGridContainer` or \ + list(:class:`~.tools.geopandas_helper.GeoPandasGridContainer`) + Data container with GeoDataFrames containing all georeferenced components + within the grid(s). + + """ + if mode == "mv": + return self.mv_grid.geopandas + elif mode == "lv": + raise NotImplementedError("LV Grids are not georeferenced yet.") + # for lv_grid in self.mv_grid.lv_grids: + # yield lv_grid.geopandas + else: + raise ValueError(f"{mode} is not valid. See docstring for more info.") + def to_csv(self, directory): """ Exports topology to csv files. @@ -2429,7 +2513,117 @@ def from_csv(self, directory, edisgo_obj): self._grids[str(lv_grid)] = lv_grid # Check data integrity - _validate_ding0_grid_import(edisgo_obj.topology) + self.check_integrity() + + def check_integrity(self): + """ + Check imported data integrity. + + Checks for duplicated labels and isolated components. + + """ + # check for duplicate labels (of components) + duplicated_labels = [] + duplicated_comps = [] + + for comp in [ + "buses", + "generators", + "loads", + "transformers", + "lines", + "switches", + ]: + df = getattr(self, comp + "_df") + if any(df.index.duplicated()): + duplicated_comps.append(comp) + duplicated_labels.append(df.index[df.index.duplicated()].values) + + if duplicated_labels: + logger.warning( + "{labels} have duplicate entry in one of the following components' " + "dataframes: {comps}.".format( + labels=", ".join( + np.concatenate([list.tolist() for list in duplicated_labels]) + ), + comps=", ".join(duplicated_comps), + ) + ) + + # check for isolated or not defined buses + buses = [] + + for nodal_component in [ + "loads", + "generators", + "storage_units", + ]: + df = getattr(self, nodal_component + "_df") + missing = df.index[~df.bus.isin(self.buses_df.index)] + buses.append(df.bus.values) + + if len(missing) > 0: + logger.warning( + f"The following {nodal_component} have buses which are not defined:" + f" {', '.join(missing.values)}." + ) + + for branch_component in ["lines", "transformers"]: + df = getattr(self, branch_component + "_df") + + for attr in ["bus0", "bus1"]: + buses.append(df[attr].values) + missing = df.index[~df[attr].isin(self.buses_df.index)] + + if len(missing) > 0: + logger.warning( + f"The following {branch_component} have {attr} which are not " + f"defined: {', '.join(missing.values)}." + ) + + for attr in ["bus_open", "bus_closed"]: + missing = self.switches_df.index[ + ~self.switches_df[attr].isin(self.buses_df.index) + ] + buses.append(self.switches_df[attr].values) + + if len(missing) > 0: + logger.warning( + f"The following switches have {attr} which are not defined: " + f"{', '.join(missing.values)}." + ) + + all_buses = np.unique(np.concatenate(buses, axis=None)) + missing = self.buses_df.index[~self.buses_df.index.isin(all_buses)] + if len(missing) > 0: + logger.warning( + f"The following buses are isolated: {', '.join(missing.values)}." + ) + + # check for subgraphs + subgraphs = list( + self.to_graph().subgraph(c) + for c in nx.connected_components(self.to_graph()) + ) + if len(subgraphs) > 1: + logger.warning("The network has isolated nodes or edges.") + + # check impedance + for branch_component in ["lines", "transformers"]: + if branch_component == "lines": + z = getattr(self, branch_component + "_df").apply( + lambda x: np.sqrt(np.square(x.r) + np.square(x.x)), axis=1 + ) + else: + z = getattr(self, branch_component + "_df").apply( + lambda x: np.sqrt(np.square(x.r_pu) + np.square(x.x_pu)), axis=1 + ) + if not z.empty and (z < 1e-6).any(): + logger.warning( + f"Very small values for impedance of {branch_component}: " + f"{z[z < 1e-6].index.values}. This might cause problems in the " + f"power flow." + ) def __repr__(self): - return "Network topology " + str(self.id) + return f"Network topology {self.id}" diff --git a/edisgo/opf/results/opf_expand_network.py b/edisgo/opf/results/opf_expand_network.py index a3111aa50..6a1c657b6 100644 --- a/edisgo/opf/results/opf_expand_network.py +++ b/edisgo/opf/results/opf_expand_network.py @@ -3,9 +3,7 @@ import numpy as np import pandas as pd -from edisgo.network.timeseries import add_storage_units_timeseries - -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def expand_network(edisgo, tolerance=1e-6): @@ -25,11 +23,10 @@ def expand_network(edisgo, tolerance=1e-6): nep_factor = edisgo.opf_results.lines.nep.values.astype("float") - # Only round up numbers that are reasonably far away from the nearest - # Integer - # ToDo: fix! if there was more than 1 line before the optimization this ceil - # will overestimate the number of added lines (np.ceil(nep_factor*lines.num_parallel - tolerance)) - # this will give number of added lines + # Only round up numbers that are reasonably far away from the nearest Integer + # TODO: fix! if there was more than 1 line before the optimization this ceil will + # overestimate the number of added lines (np.ceil(nep_factor*lines.num_parallel - + # tolerance)) this will give number of added lines nep_factor = np.ceil(nep_factor - tolerance) # Get the names of all MV grid lines @@ -73,58 +70,7 @@ def grid_expansion_costs(opf_results, tolerance=1e-6): ) costs_cable = opf_results.pypsa.lines.loc[lines, "costs_cable"] * num_new_lines - earthworks = [1 if num_new_lines[l] > 0 else 0 for l in lines] - costs_earthwork = ( - opf_results.pypsa.lines.loc[lines, "costs_earthworks"] * earthworks - ) - - total_costs = costs_cable + costs_earthwork - extended_lines = total_costs[total_costs > 0].index - costs_df = pd.DataFrame( - data={ - "total_costs": total_costs.loc[extended_lines], - "type": ["line"] * len(extended_lines), - "length": opf_results.pypsa.lines.loc[extended_lines, "length"], - "quantity": num_new_lines.loc[extended_lines], - "voltage_level": ["mv"] * len(extended_lines), - }, - index=extended_lines, - ) - - return costs_df - - -def grid_expansion_costs(opf_results, tolerance=1e-6): - """ - Calculates grid expansion costs from OPF. - - As grid expansion is conducted continuously number of expanded lines is - determined by simply rounding up (including some tolerance). - - Parameters - --------- - opf_results : OPFResults class - tolerance : float - - Returns - -------- - float - Grid expansion costs determined by OPF - - """ - # ToDo maybe choose differenct default tolerance - lines = opf_results.lines.index - - num_new_lines = ( - np.ceil( - opf_results.lines.nep * opf_results.pypsa.lines.loc[lines, "num_parallel"] - - tolerance - ) - - opf_results.pypsa.lines.loc[lines, "num_parallel"] - ) - costs_cable = opf_results.pypsa.lines.loc[lines, "costs_cable"] * num_new_lines - - earthworks = [1 if num_new_lines[l] > 0 else 0 for l in lines] + earthworks = [1 if num_new_lines[line] > 0 else 0 for line in lines] costs_earthwork = ( opf_results.pypsa.lines.loc[lines, "costs_earthworks"] * earthworks ) @@ -187,57 +133,57 @@ def integrate_storage_units( reactive_power_ts = pd.DataFrame( 0.0, columns=storage_ts.columns, index=storage_ts.index ) + else: + storage_ts = None + reactive_power_ts = None - # ToDo adding timeseries will only work if timeseries.mode is None - # ToDo @Anya why is for mode manual kwarg called 'storage_units_reactive_power' - # and for mode None kwarg called 'timeseries_storage_units' - for st in edisgo.opf_results.storage_units.index: + # check if storage should be discarded + add_storage_units = list(edisgo.opf_results.storage_units.index) + for st in add_storage_units: storage_cap = edisgo.opf_results.storage_units.at[st, "emax"] - if storage_cap >= min_storage_size and (storage_ts.loc[:, st] > 0.001).any(): - if not as_load: - storage = edisgo.topology.add_storage_unit( - bus=st, p_nom=storage_cap - ) # as C-rate is currently always 1 - else: - storage = edisgo.topology.add_load( - load_id=1, - bus=st, - p_nom=storage_cap, - annual_consumption=0.0, - sector="storage", - ) - if timeseries: - ts_active = storage_ts.loc[:, [st]].rename(columns={st: storage}) - ts_reactive = reactive_power_ts.loc[:, [st]].rename( - columns={st: storage} - ) - if not as_load: - add_storage_units_timeseries( - edisgo_obj=edisgo, - storage_unit_names=storage, - timeseries_storage_units=ts_active, - timeseries_storage_units_reactive_power=ts_reactive, - ) - else: - # ToDo change once fixed in timeseries - edisgo.timeseries.loads_active_power = pd.concat( - [edisgo.timeseries.loads_active_power, -ts_active], - axis=1, - sort=False, - ) - edisgo.timeseries.loads_reactive_power = pd.concat( - [edisgo.timeseries.loads_reactive_power, ts_reactive], - axis=1, - sort=False, - ) - - added_storage_units.append(storage) - else: + if storage_cap < min_storage_size: + add_storage_units.remove(st) logger.info( - "Storage size of storage unit at bus {} is too small and " - "therefore discarded.".format(st) + f"Storage size of storage unit at bus {st} is too small and " + "the storage therefore discarded." ) storage_cap_discarded += storage_cap + elif timeseries: + if (storage_ts.loc[:, st] < 0.001).all(): + add_storage_units.remove(st) + logger.info( + f"Storage use of storage unit at bus {st} is too small and " + "the storage therefore discarded." + ) + storage_cap_discarded += storage_cap + + for st in add_storage_units: + storage_cap = edisgo.opf_results.storage_units.at[st, "emax"] + + if not as_load: + storage = edisgo.add_component( + comp_type="storage_unit", + add_ts=timeseries, + ts_active_power=storage_ts, + ts_reactive_power=reactive_power_ts, + bus=st, + p_nom=storage_cap, + ) # as C-rate is currently always 1 + else: + storage = edisgo.add_component( + comp_type="storage_unit", + add_ts=timeseries, + ts_active_power=storage_ts, + ts_reactive_power=reactive_power_ts, + bus=st, + p_set=storage_cap, + type="storage", + annual_consumption=0.0, + sector="storage", + ) + + added_storage_units.append(storage) + return added_storage_units, storage_cap_discarded @@ -375,12 +321,12 @@ def integrate_curtailment_as_load(edisgo, curtailment_per_node): for n in active_power_ts.columns: - if not n in curtailment_loads.bus: + if n not in curtailment_loads.bus: # add load component load = edisgo.topology.add_load( load_id=1, bus=n, - p_nom=curtailment_per_node.loc[:, n].max(), + p_set=curtailment_per_node.loc[:, n].max(), annual_consumption=0.0, sector="curtailment", ) diff --git a/edisgo/opf/results/opf_result_class.py b/edisgo/opf/results/opf_result_class.py index ac86ff7bf..1ecb4a1ff 100644 --- a/edisgo/opf/results/opf_result_class.py +++ b/edisgo/opf/results/opf_result_class.py @@ -8,7 +8,7 @@ preprocess_pypsa_opf_structure, ) -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def read_from_json(edisgo_obj, path, mode="mv"): @@ -116,7 +116,8 @@ def set_solution_to_results(self, pypsa_net): # Bus Variables self.set_bus_variables(pypsa_net) # Generator Variables - # TODO Adjust for case that generators are fixed and no variables are returned from julia + # TODO: Adjust for case that generators are fixed and no variables are returned + # from julia self.set_gen_variables(pypsa_net) self.set_load_variables(pypsa_net) # Storage Variables @@ -134,7 +135,8 @@ def set_line_variables(self, pypsa_net): br_statics.index = pypsa_net.lines.index self.lines = br_statics - # time dependent variables: cm: squared current magnitude, p: active power flow, q: reactive power flow + # time dependent variables: cm: squared current magnitude, p: active power flow, + # q: reactive power flow ts = pypsa_net.snapshots.sort_values() cm_t = pd.DataFrame(index=ts, columns=pypsa_net.lines.index) p_t = pd.DataFrame(index=ts, columns=pypsa_net.lines.index) @@ -193,7 +195,7 @@ def set_gen_variables(self, pypsa_net): qg_t.loc[date_idx] = gen_t.qg self.generators_t.pg = pg_t self.generators_t.qg = qg_t - except: + except Exception: logger.warning("Error in writing OPF solutions for slack time series.") else: try: @@ -208,7 +210,7 @@ def set_gen_variables(self, pypsa_net): qg_t.loc[date_idx] = gen_t.qg self.generators_t.pg = pg_t self.generators_t.qg = qg_t - except: + except Exception: logger.warning( "Error in writing OPF solutions for generator time series." ) diff --git a/edisgo/opf/run_mp_opf.py b/edisgo/opf/run_mp_opf.py index ccd22f631..8806263ed 100644 --- a/edisgo/opf/run_mp_opf.py +++ b/edisgo/opf/run_mp_opf.py @@ -84,7 +84,8 @@ def run_mp_opf(edisgo_network, timesteps=None, storage_series=[], **kwargs): "time_elapsed": 1.0, # storage units are considered "storage_units": False, - # positioning of storage units, if empty list, all buses are potential positions of storage units and + # positioning of storage units, if empty list, all buses are potential positions + # of storage units and # capacity is optimized "storage_buses": [], # total storage capacity in the network @@ -98,7 +99,8 @@ def run_mp_opf(edisgo_network, timesteps=None, storage_series=[], **kwargs): # An overall allowance of curtailment is considered "curtailment_allowance": False, # Maximal allowed curtailment over entire time horizon, - # DEFAULT: "3percent"=> 3% of total RES generation in time horizon may be curtailed, else: Float + # DEFAULT: "3percent"=> 3% of total RES generation in time horizon may be + # curtailed, else: Float "curtailment_total": "3percent", "results_path": "opf_solutions" # path to where OPF results are stored @@ -112,7 +114,8 @@ def run_mp_opf(edisgo_network, timesteps=None, storage_series=[], **kwargs): logger.debug(scenario_data_dir) # solution_dir = os.path.join(opf_dir, "opf_solutions/") - # set path to edisgoOPF folder for scenario data and julia module relative to this file + # set path to edisgoOPF folder for scenario data and julia module relative to this + # file # abspath = os.path.dirname(os.path.abspath(__file__)) # opf_dir = os.path.join(abspath, "edisgoOPF/") # scenario_data_dir = os.path.join(opf_dir, "edisgo_scenario_data") @@ -162,15 +165,18 @@ def run_mp_opf(edisgo_network, timesteps=None, storage_series=[], **kwargs): logger.debug("preprocessing pypsa structure for opf") preprocess_pypsa_opf_structure(edisgo_network, pypsa_mv, hvmv_trafo=False) aggregate_fluct_generators(pypsa_mv) - # convert pypsa structure to network dictionary and create dictionaries for time series of loads and generators + # convert pypsa structure to network dictionary and create dictionaries for time + # series of loads and generators pm, load_data, gen_data = to_powermodels(pypsa_mv) storage_data = convert_storage_series(storage_series) - # Export eDisGo storage only for operation only as they would interfere with positioning + # Export eDisGo storage only for operation only as they would interfere with + # positioning if settings["storage_operation_only"]: add_storage_from_edisgo(edisgo_network, pypsa_mv, pm) - # dump json files for static network information, timeseries of loads and generators, and opf settings + # dump json files for static network information, timeseries of loads and + # generators, and opf settings with open( os.path.join(scenario_data_dir, "{}_static.json".format(pm["name"])), "w", diff --git a/edisgo/opf/timeseries_reduction.py b/edisgo/opf/timeseries_reduction.py index a7c2c7374..1af50f5d3 100644 --- a/edisgo/opf/timeseries_reduction.py +++ b/edisgo/opf/timeseries_reduction.py @@ -133,14 +133,14 @@ def get_steps_storage(edisgo_obj, window=5): nodes = pd.DataFrame(v) if "time_index" in nodes: for step in nodes["time_index"]: - if not step in crit_periods: + if step not in crit_periods: crit_periods.append(step) # Get periods with current violations crit_lines = check_tech_constraints.mv_line_load(edisgo_obj) if "time_index" in crit_lines: for step in crit_lines["time_index"]: - if not step in crit_periods: + if step not in crit_periods: crit_periods.append(step) reduced = [] diff --git a/edisgo/opf/util/scenario_settings.py b/edisgo/opf/util/scenario_settings.py index 3db4edf36..43bd46c19 100644 --- a/edisgo/opf/util/scenario_settings.py +++ b/edisgo/opf/util/scenario_settings.py @@ -1,13 +1,16 @@ def opf_settings(): opf_settings = { - # name of postmethod, right now this is just the scenario name and every scenario is handle in one problem setup - # the so-called postmethod - # for future extension define multiple postmethods in different julia files which will be call by the scenarioname + # name of postmethod, right now this is just the scenario name and every + # scenario is handle in one problem setup the so-called postmethod for future + # extension define multiple postmethods in different julia files which will be + # call by the scenarioname "scenario": "nep", - # objective function, DEFAULT "nep", future extension might include "generation costs" "storage costs" etc. + # objective function, DEFAULT "nep", future extension might include "generation + # costs" "storage costs" etc. "objective": "nep", - # chosen relaxation, DEFAULT: "none", options: "none", "soc", "soc_cr", "cr", relaxation are described in - # masterthesis "MULTIPERIOD OPTIMAL POWER FLOW PROBLEM IN DISTRIBUTION SYSTEM PLANNING" by Jaap Pedersen + # chosen relaxation, DEFAULT: "none", options: "none", "soc", "soc_cr", "cr", + # relaxation are described in masterthesis "MULTIPERIOD OPTIMAL POWER FLOW + # PROBLEM IN DISTRIBUTION SYSTEM PLANNING" by Jaap Pedersen "relaxation": "none", # Dictionary of linked time steps in the format {linked step => original step} "clusters": {}, @@ -17,15 +20,19 @@ def opf_settings(): "time_horizon": 2, # length of time step in hours "time_elapsed": 1.0, - # storage units are considered, DEFAULT:False, if true storage units will be located either at buses given in - # "storage_buses" or if "storage_buses"=[] all buses are considered as possible locations + # storage units are considered, DEFAULT:False, if true storage units will be + # located either at buses given in "storage_buses" or if "storage_buses"=[] all + # buses are considered as possible locations "storage_units": False, - # positioning of storage units, if empty list, all buses are potential positions of storage units and - # capacity is optimized, entries of list need to be type "int" + # positioning of storage units, if empty list, all buses are potential + # positions of storage units and capacity is optimized, entries of list need to + # be type "int" "storage_buses": [], - # Only optimize operation of storages exported from eDisGo. Do not optimize storage positioning. + # Only optimize operation of storages exported from eDisGo. Do not optimize + # storage positioning. "storage_operation_only": False, - # total storage capacity in the network, sizing of storages is a decision variable and will be found in optimization + # total storage capacity in the network, sizing of storages is a decision + # variable and will be found in optimization "total_storage_capacity": 0.0, # Requirements for curtailment in every time step is considered, DEFAULT: False "curtailment_requirement": False, diff --git a/edisgo/tools/__init__.py b/edisgo/tools/__init__.py index 6bca6aa4c..10191a703 100644 --- a/edisgo/tools/__init__.py +++ b/edisgo/tools/__init__.py @@ -16,7 +16,7 @@ def session_scope(): session = Session() try: yield session - except: + except Exception: session.rollback() raise finally: diff --git a/edisgo/tools/config.py b/edisgo/tools/config.py index 2e0a4f4bb..55c011781 100644 --- a/edisgo/tools/config.py +++ b/edisgo/tools/config.py @@ -27,11 +27,11 @@ import edisgo -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) try: import configparser as cp -except: +except Exception: # to be compatible with Python2.7 import ConfigParser as cp @@ -43,7 +43,7 @@ internal_config_file = os.path.join(package_path, "config", "config_system.cfg") try: cfg.read(internal_config_file) -except: +except Exception: logger.exception("Internal config {} file not found.".format(internal_config_file)) @@ -147,7 +147,7 @@ def _load_config(config_path=None): # try str -> float conversion try: config_dict[sec][subsec] = float(val) - except: + except Exception: pass # convert to time object @@ -172,12 +172,12 @@ def __getitem__(self, key1, key2=None): if key2 is None: try: return self._data[key1] - except: + except Exception: raise KeyError("Config does not contain section {}.".format(key1)) else: try: return self._data[key1][key2] - except: + except Exception: raise KeyError( "Config does not contain value for {} or " "section {}.".format(key2, key1) @@ -268,13 +268,13 @@ def get(section, key): pass try: return cfg.getfloat(section, key) - except: + except Exception: try: return cfg.getint(section, key) - except: + except Exception: try: return cfg.getboolean(section, key) - except: + except Exception: return cfg.get(section, key) diff --git a/edisgo/tools/edisgo_run.py b/edisgo/tools/edisgo_run.py index 2bdee4110..77e1f8aa9 100755 --- a/edisgo/tools/edisgo_run.py +++ b/edisgo/tools/edisgo_run.py @@ -3,7 +3,6 @@ import logging import multiprocessing as mp import os -import re import sys import multiprocess as mp2 @@ -110,7 +109,7 @@ def run_edisgo_basic( return ( None, pd.DataFrame(), - {"network": grid_district, "msg": str(e)}, + {"network": edisgo_grid, "msg": str(e)}, ) logging.info("Grid expansion for MV network {}".format(edisgo_grid.topology.id)) @@ -147,12 +146,12 @@ def run_edisgo_basic( except MaximumIterationError: grid_issues["network"] = edisgo_grid.network.id grid_issues["msg"] = str(edisgo_grid.network.results.unresolved_issues) - costs = pd.DataFrame() + costs = pd.DataFrame(dtype=float) logging.warning("Unresolved issues left after network expansion.") except Exception as e: grid_issues["network"] = edisgo_grid.network.id grid_issues["msg"] = repr(e) - costs = pd.DataFrame() + costs = pd.DataFrame(dtype=float) logging.exception() return edisgo_grid, costs, grid_issues @@ -236,7 +235,8 @@ def run_edisgo_pool( Ding0 network data file names run_args_opt : list eDisGo options, see :func:`run_edisgo_basic` and - :func:`run_edisgo_twice`, has to contain generator_scenario and analysis as entries + :func:`run_edisgo_twice`, has to contain generator_scenario and analysis as + entries workers: int Number of parallel process worker_lifetime : int @@ -501,7 +501,7 @@ def edisgo_run(): # get current time for output file names exec_time = pd.datetime.now().strftime("%Y-%m-%d_%H%M") - logger = setup_logging( + logger = setup_logging( # noqa: F841 logfilename="test.log", logfile_loglevel="debug", console_loglevel="info", @@ -529,7 +529,7 @@ def edisgo_run(): raise FileNotFoundError("Some of the Arguments for input files are missing.") # this is the serial version of the run system - run_func = run_edisgo_basic + run_func = run_edisgo_basic # noqa: F841 run_args_opt_no_scenario = [None] run_args_opt = [args.scenario] @@ -547,7 +547,7 @@ def edisgo_run(): if not args.parallel: for ding0_filename in ding0_file_list: - grid_district = _get_griddistrict(ding0_filename) + grid_district = _get_griddistrict(ding0_filename) # noqa: F821, F841 run_args = [ding0_filename] run_args.extend(run_args_opt_no_scenario) diff --git a/edisgo/tools/geo.py b/edisgo/tools/geo.py index 4a2402176..46f60dcc6 100755 --- a/edisgo/tools/geo.py +++ b/edisgo/tools/geo.py @@ -9,7 +9,7 @@ import logging -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) def proj2equidistant(srid): diff --git a/edisgo/tools/geopandas_helper.py b/edisgo/tools/geopandas_helper.py new file mode 100644 index 000000000..4f877a0c9 --- /dev/null +++ b/edisgo/tools/geopandas_helper.py @@ -0,0 +1,230 @@ +from __future__ import annotations + +import os + +from typing import TYPE_CHECKING + +if "READTHEDOCS" not in os.environ: + import geopandas as gpd + + from shapely.geometry import LineString + +if TYPE_CHECKING: + from edisgo.network.grids import Grid + +COMPONENTS: list[str] = [ + "generators_df", + "loads_df", + "storage_units_df", + "transformers_df", +] + + +class GeoPandasGridContainer: + """ + Grids geo data for all components with information about their geolocation. + + Parameters + ---------- + crs : str + Coordinate Reference System of the geometry objects. + id : str or int + Grid identifier + grid : :class:`~.network.grids.Grid` + Matching grid object + buses_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all buses in the Grid. See + :attr:`~.network.topology.Topology.buses_df` for more information. + generators_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all generators in the Grid. See + :attr:`~.network.topology.Topology.generators_df` for more information. + loads_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all loads in the Grid. See + :attr:`~.network.topology.Topology.loads_df` for more information. + storage_units_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all storage units in the Grid. See + :attr:`~.network.topology.Topology.storage_units_df` for more information. + transformers_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all transformers in the Grid. See + :attr:`~.network.topology.Topology.transformers_df` for more information. + lines_gdf : :geopandas:`GeoDataFrame` + GeoDataframe with all lines in the Grid. See + :attr:`~.network.topology.Topology.loads_df` for more information. + """ + + def __init__( + self, + crs: str, + grid_id: str | int, + grid: Grid, + buses_gdf: gpd.GeoDataFrame, + generators_gdf: gpd.GeoDataFrame, + loads_gdf: gpd.GeoDataFrame, + storage_units_gdf: gpd.GeoDataFrame, + transformers_gdf: gpd.GeoDataFrame, + lines_gdf: gpd.GeoDataFrame, + ): + self.crs = crs + self.grid_id = grid_id + self.grid = grid + self.buses_gdf = buses_gdf + self.generators_gdf = generators_gdf + self.loads_gdf = loads_gdf + self.storage_units_gdf = storage_units_gdf + self.transformers_gdf = transformers_gdf + self.lines_gdf = lines_gdf + + @property + def crs(self): + """The crs property.""" + return self._crs + + @crs.setter + def crs(self, crs_str): + self._crs = crs_str + + @property + def grid_id(self): + """The grid_id property.""" + return self._grid_id + + @grid_id.setter + def grid_id(self, grid_id_val): + self._grid_id = grid_id_val + + @property + def grid(self): + """The grid property.""" + return self.grid + + @grid.setter + def grid(self, grid_obj): + self.grid = grid_obj + + @property + def buses_gdf(self): + """The buses_gdf property.""" + return self._buses_gdf + + @buses_gdf.setter + def buses_gdf(self, gdf): + self._buses_gdf = gdf + + @property + def generators_gdf(self): + """The generators_gdf property.""" + return self._generators_gdf + + @generators_gdf.setter + def generators_gdf(self, gdf): + self._generators_gdf = gdf + + @property + def loads_gdf(self): + """The loads_gdf property.""" + return self._loads_gdf + + @loads_gdf.setter + def loads_gdf(self, gdf): + self._loads_gdf = gdf + + @property + def storage_units_gdf(self): + """The storage_units_gdf property.""" + return self._storage_units_gdf + + @storage_units_gdf.setter + def storage_units_gdf(self, gdf): + self._storage_units_gdf = gdf + + @property + def transformers_gdf(self): + """The transformers_gdf property.""" + return self._transformers_gdf + + @transformers_gdf.setter + def transformers_gdf(self, gdf): + self._transformers_gdf = gdf + + @property + def lines_gdf(self): + """The lines_gdf property.""" + return self._lines_gdf + + @lines_gdf.setter + def lines_gdf(self, gdf): + self._lines_gdf = gdf + + def plot(self): + """ + TODO: Implement plotting functions as needed + """ + raise NotImplementedError + + +def to_geopandas(grid_obj: Grid): + """ + Translates all DataFrames with geolocations within a Grid class to GeoDataFrames + + Parameters + ---------- + grid_obj : :class:`~.network.grids.Grid` + Grid object to transform + + Returns + ------- + :class:`.GeoPandasGridContainer` + Data container with the grids geo data for all components with information about + their geolocation. + + """ + # get srid id + srid = grid_obj._edisgo_obj.topology.grid_district["srid"] + + # convert buses_df + buses_df = grid_obj.buses_df + buses_df = buses_df.assign( + geometry=gpd.points_from_xy(buses_df.x, buses_df.y, crs=f"EPSG:{srid}") + ).drop(columns=["x", "y"]) + + buses_gdf = gpd.GeoDataFrame(buses_df, crs=f"EPSG:{srid}") + + # convert component DataFrames + components_dict = {} + + for component in COMPONENTS: + left_on = "bus1" if component == "transformers_df" else "bus" + + attr = getattr(grid_obj, component) + + components_dict[component.replace("_df", "_gdf")] = gpd.GeoDataFrame( + attr.merge( + buses_gdf[["geometry", "v_nom"]], left_on=left_on, right_index=True + ), + crs=f"EPSG:{srid}", + ) + + # convert lines_df + lines_df = grid_obj.lines_df + + geom_0 = lines_df.merge( + buses_gdf[["geometry"]], left_on="bus0", right_index=True + ).geometry + geom_1 = lines_df.merge( + buses_gdf[["geometry"]], left_on="bus1", right_index=True + ).geometry + + geometry = [ + LineString([point_0, point_1]) for point_0, point_1 in list(zip(geom_0, geom_1)) + ] + + lines_gdf = gpd.GeoDataFrame(lines_df.assign(geometry=geometry), crs=f"EPSG:{srid}") + + return GeoPandasGridContainer( + crs=f"EPSG:{srid}", + grid_id=grid_obj.id, + grid=grid_obj, + buses_gdf=buses_gdf, + lines_gdf=lines_gdf, + **components_dict, + ) diff --git a/edisgo/tools/plots.py b/edisgo/tools/plots.py index 8c81381b9..76502cc14 100644 --- a/edisgo/tools/plots.py +++ b/edisgo/tools/plots.py @@ -15,6 +15,7 @@ from dash.dependencies import Input, Output from jupyter_dash import JupyterDash from matplotlib import pyplot as plt +from networkx import Graph from pyproj import Transformer from pypsa import Network as PyPSANetwork @@ -29,7 +30,6 @@ from edisgo.network.grids import Grid if "READTHEDOCS" not in os.environ: - from egoio.db_tables.grid import EgoDpMvGriddistrict from egoio.db_tables.model_draft import EgoGridMvGriddistrict from geoalchemy2 import shape @@ -37,12 +37,12 @@ geopandas = True try: import geopandas as gpd - except: + except Exception: geopandas = False contextily = True try: import contextily as ctx - except: + except Exception: contextily = False logger = logging.getLogger(__name__) @@ -58,7 +58,8 @@ def histogram(data, **kwargs): Data to be plotted, e.g. voltage or current (`v_res` or `i_res` from :class:`network.results.Results`). Index of the dataframe must be a :pandas:`pandas.DatetimeIndex`. - timeindex : :pandas:`pandas.Timestamp` or list(:pandas:`pandas.Timestamp`) or None, optional + timeindex : :pandas:`pandas.Timestamp` or \ + list(:pandas:`pandas.Timestamp`) or None, optional Specifies time steps histogram is plotted for. If timeindex is None all time steps provided in `data` are used. Default: None. directory : :obj:`str` or None, optional @@ -128,7 +129,7 @@ def histogram(data, **kwargs): } try: fig_size = standard_sizes[fig_size] - except: + except Exception: fig_size = standard_sizes["a5landscape"] plot_data = data.loc[timeindex, :].T.stack() @@ -544,7 +545,7 @@ def nodes_by_costs(buses, grid_expansion_costs, edisgo_obj): try: bus_colors[bus] = costs_lv_stations.loc[bus, "total_costs"] bus_sizes[bus] = 100 - except: + except Exception: bus_colors[bus] = 0 bus_sizes[bus] = 0 # MVStation handeling @@ -552,7 +553,7 @@ def nodes_by_costs(buses, grid_expansion_costs, edisgo_obj): try: bus_colors[bus] = costs_mv_station.loc[bus, "total_costs"] bus_sizes[bus] = 100 - except: + except Exception: bus_colors[bus] = 0 bus_sizes[bus] = 0 else: @@ -748,16 +749,17 @@ def nodes_by_costs(buses, grid_expansion_costs, edisgo_obj): # storage_units if node_color == "expansion_costs": - ax.scatter( - pypsa_plot.buses.loc[ - edisgo_obj.topology.storage_units_df.loc[:, "bus"], "x" - ], - pypsa_plot.buses.loc[ - edisgo_obj.topology.storage_units_df.loc[:, "bus"], "y" - ], - c="orangered", - s=edisgo_obj.topology.storage_units_df.loc[:, "p_nom"] * 1000 / 3, - ) + if not edisgo_obj.topology.storage_units_df.empty: + ax.scatter( + pypsa_plot.buses.loc[ + edisgo_obj.topology.storage_units_df.loc[:, "bus"], "x" + ], + pypsa_plot.buses.loc[ + edisgo_obj.topology.storage_units_df.loc[:, "bus"], "y" + ], + c="orangered", + s=edisgo_obj.topology.storage_units_df.loc[:, "p_nom"] * 1000 / 3, + ) # add legend for storage size and line capacity if ( node_color == "storage_integration" or node_color == "expansion_costs" @@ -830,7 +832,7 @@ def nodes_by_costs(buses, grid_expansion_costs, edisgo_obj): # draw arrows on lines if arrows and timestep and line_color == "loading": path = ll[1].get_segments() - colors = cmap(ll[1].get_array() / 100) + # colors = cmap(ll[1].get_array() / 100) for i in range(len(path)): if edisgo_obj.lines_t.p0.loc[timestep, line_colors.index[i]] > 0: arrowprops = dict(arrowstyle="->", color="b") # colors[i]) @@ -953,8 +955,6 @@ def draw_plotly( if G is None: G = edisgo_obj.topology.mv_grid.graph - node_list = list(G.nodes()) - if hasattr(grid, "transformers_df"): node_root = grid.transformers_df.bus1.iat[0] x_root, y_root = G.nodes[node_root]["pos"] @@ -1127,7 +1127,7 @@ def draw_plotly( try: peak_load = edisgo_obj.topology.loads_df.loc[ edisgo_obj.topology.loads_df.bus == node - ].p_nom.sum() + ].p_set.sum() text += "
" + "peak_load = " + str(peak_load) p_nom = edisgo_obj.topology.generators_df.loc[ edisgo_obj.topology.generators_df.bus == node diff --git a/edisgo/tools/powermodels_io.py b/edisgo/tools/powermodels_io.py index c10d7bcbc..d8e4d503c 100644 --- a/edisgo/tools/powermodels_io.py +++ b/edisgo/tools/powermodels_io.py @@ -4,10 +4,10 @@ import pandas as pd import pypsa -from pypower.idx_brch import * -from pypower.idx_bus import * -from pypower.idx_cost import * -from pypower.idx_gen import * +from pypower.idx_brch import * # noqa: F403 +from pypower.idx_bus import * # noqa: F403 +from pypower.idx_cost import * # noqa: F403 +from pypower.idx_gen import * # noqa: F403 def to_powermodels(pypsa_net): @@ -47,11 +47,16 @@ def convert_storage_series(timeseries): return storage -# FIXME: Static storage data is exported from the eDisGo network rather than the PyPSA network as the capacity of network doesn't seem to be available there. For consistency with the rest of the conversion, it should be converted from PyPSA as well. -# TODO: This will (probably) not work if there are multiple storage units connected to the same bus. +# FIXME: Static storage data is exported from the eDisGo network rather than the PyPSA +# network as the capacity of network doesn't seem to be available there. For +# consistency with the rest of the conversion, it should be converted from PyPSA as +# well. +# TODO: This will (probably) not work if there are multiple storage units connected to +# the same bus. def add_storage_from_edisgo(edisgo_obj, psa_net, pm_dict): """ - Read static storage data (position and capacity) from eDisGo and export to Powermodels dict + Read static storage data (position and capacity) from eDisGo and export to + Powermodels dict """ # Drop values that are not available storage = pd.DataFrame(edisgo_obj.topology.storage_units_df[["bus", "p_nom"]]) @@ -129,7 +134,7 @@ def pypsa2ppc(psa_net): time_horizon ) ) - except IndexError as e: + except IndexError: print("No load timeseries. Create empty dicts for timeseries of load") load_dict = dict() try: @@ -139,7 +144,7 @@ def pypsa2ppc(psa_net): time_horizon ) ) - except IndexError as e: + except IndexError: print( "no generator timeseries Create empty dicts " "for timeseries of load and generation " @@ -178,18 +183,18 @@ def ppc2pm(ppc, psa_net): # pragma: no cover shunt_idx = 1 for row in ppc["bus"]: bus = dict() - idx = int(row[BUS_I]) + 1 + idx = int(row[BUS_I]) + 1 # noqa: F405 bus["index"] = idx bus["bus_i"] = idx - bus["zone"] = int(row[ZONE]) - bus["bus_type"] = int(row[BUS_TYPE]) - bus["vmax"] = row[VMAX] - bus["vmin"] = row[VMIN] - bus["va"] = row[VA] - bus["vm"] = row[VM] - bus["base_kv"] = row[BASE_KV] - pd = row[PD] - qd = row[QD] + bus["zone"] = int(row[ZONE]) # noqa: F405 + bus["bus_type"] = int(row[BUS_TYPE]) # noqa: F405 + bus["vmax"] = row[VMAX] # noqa: F405 + bus["vmin"] = row[VMIN] # noqa: F405 + bus["va"] = row[VA] # noqa: F405 + bus["vm"] = row[VM] # noqa: F405 + bus["base_kv"] = row[BASE_KV] # noqa: F405 + pd = row[PD] # noqa: F405 + qd = row[QD] # noqa: F405 if pd != 0 or qd != 0: pm["load"][str(load_idx)] = { "pd": pd, @@ -199,8 +204,8 @@ def ppc2pm(ppc, psa_net): # pragma: no cover "index": load_idx, } load_idx += 1 - bs = row[BS] - gs = row[GS] + bs = row[BS] # noqa: F405 + gs = row[GS] # noqa: F405 if pd != 0 or qd != 0: pm["shunt"][str(shunt_idx)] = { "gs": gs, @@ -217,40 +222,45 @@ def ppc2pm(ppc, psa_net): # pragma: no cover branch = dict() branch["index"] = idx branch["transformer"] = idx > n_lines - branch["br_r"] = row[BR_R].real - branch["br_x"] = row[BR_X].real - branch["g_fr"] = -row[BR_B].imag / 2.0 - branch["g_to"] = -row[BR_B].imag / 2.0 - branch["b_fr"] = row[BR_B].real / 2.0 - branch["b_to"] = row[BR_B].real / 2.0 - branch["rate_a"] = row[RATE_A].real if row[RATE_A] > 0 else row[RATE_B].real - branch["rate_b"] = row[RATE_B].real - branch["rate_c"] = row[RATE_C].real - branch["f_bus"] = int(row[F_BUS].real) + 1 - branch["t_bus"] = int(row[T_BUS].real) + 1 - branch["br_status"] = int(row[BR_STATUS].real) - branch["angmin"] = row[ANGMIN].real - branch["angmax"] = row[ANGMAX].real - branch["tap"] = row[TAP].real - branch["shift"] = math.radians(row[SHIFT].real) + branch["br_r"] = row[BR_R].real # noqa: F405 + branch["br_x"] = row[BR_X].real # noqa: F405 + branch["g_fr"] = -row[BR_B].imag / 2.0 # noqa: F405 + branch["g_to"] = -row[BR_B].imag / 2.0 # noqa: F405 + branch["b_fr"] = row[BR_B].real / 2.0 # noqa: F405 + branch["b_to"] = row[BR_B].real / 2.0 # noqa: F405 + branch["rate_a"] = ( + row[RATE_A].real if row[RATE_A] > 0 else row[RATE_B].real # noqa: F405 + ) + branch["rate_b"] = row[RATE_B].real # noqa: F405 + branch["rate_c"] = row[RATE_C].real # noqa: F405 + branch["f_bus"] = int(row[F_BUS].real) + 1 # noqa: F405 + branch["t_bus"] = int(row[T_BUS].real) + 1 # noqa: F405 + branch["br_status"] = int(row[BR_STATUS].real) # noqa: F405 + branch["angmin"] = row[ANGMIN].real # noqa: F405 + branch["angmax"] = row[ANGMAX].real # noqa: F405 + branch["tap"] = row[TAP].real # noqa: F405 + branch["shift"] = math.radians(row[SHIFT].real) # noqa: F405 pm["branch"][str(idx)] = branch for idx, row in enumerate(ppc["gen"], start=1): gen = dict() - gen["pg"] = row[PG] - gen["qg"] = row[QG] - gen["gen_bus"] = int(row[GEN_BUS]) + 1 - gen["vg"] = row[VG] - gen["qmax"] = row[QMAX] - gen["gen_status"] = int(row[GEN_STATUS]) - gen["qmin"] = row[QMIN] - gen["pmin"] = row[PMIN] - gen["pmax"] = row[PMAX] + gen["pg"] = row[PG] # noqa: F405 + gen["qg"] = row[QG] # noqa: F405 + gen["gen_bus"] = int(row[GEN_BUS]) + 1 # noqa: F405 + gen["vg"] = row[VG] # noqa: F405 + gen["qmax"] = row[QMAX] # noqa: F405 + gen["gen_status"] = int(row[GEN_STATUS]) # noqa: F405 + gen["qmin"] = row[QMIN] # noqa: F405 + gen["pmin"] = row[PMIN] # noqa: F405 + gen["pmax"] = row[PMAX] # noqa: F405 gen["index"] = idx pm["gen"][str(idx)] = gen - # TODO add attribute "fluctuating" to generators from psa_net, maybe move to ppc first - # is_fluctuating = [int("fluctuating" in index.lower()) for index in psa_net.generators.index] + # TODO add attribute "fluctuating" to generators from psa_net, maybe move to ppc + # first + # is_fluctuating = [ + # int("fluctuating" in index.lower()) for index in psa_net.generators.index + # ] # for idx, row in enumerate(is_fluctuating, start=1): # pm["gen"][str(idx)]["fluctuating"] = row @@ -265,14 +275,14 @@ def ppc2pm(ppc, psa_net): # pragma: no cover ppc["gencost"] = ppc["gencost"][: ppc["gen"].shape[0], :] for idx, row in enumerate(ppc["gencost"], start=1): gen = pm["gen"][str(idx)] - gen["model"] = int(row[MODEL]) + gen["model"] = int(row[MODEL]) # noqa: F405 if gen["model"] == 1: - gen["ncost"] = int(row[NCOST]) - gen["cost"] = row[COST : COST + gen["ncost"] * 2].tolist() + gen["ncost"] = int(row[NCOST]) # noqa: F405 + gen["cost"] = row[COST : COST + gen["ncost"] * 2].tolist() # noqa: F405 elif gen["model"] == 2: - gen["ncost"] = int(row[NCOST]) + gen["ncost"] = int(row[NCOST]) # noqa: F405 gen["cost"] = [0] * 3 - costs = row[COST:] + costs = row[COST:] # noqa: F405 if len(costs) > 3: print(costs) raise ValueError("Maximum quadratic cost function allowed") @@ -337,20 +347,21 @@ def _build_bus(psa_net, ppc): bus_cols = len(col_names) ppc["bus"] = np.zeros(shape=(n_bus, bus_cols), dtype=float) ppc["bus"][:, :bus_cols] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.05, 0.95]) - ppc["bus"][:, BUS_I] = np.arange(n_bus) + ppc["bus"][:, BUS_I] = np.arange(n_bus) # noqa: F405 bus_types = ["PQ", "PV", "Slack", "None"] bus_types_int = np.array( [bus_types.index(b_type) + 1 for b_type in psa_net.buses["control"].values], dtype=int, ) - ppc["bus"][:, BUS_TYPE] = bus_types_int - ppc["bus"][:, BASE_KV] = psa_net.buses["v_nom"].values - # for edisgo scenario voltage bounds defined for load and feedin case with 0.985<= v <= 1.05 - # bounds have to be at least in that range, only accept stronger bounds if given - ppc["bus"][:, VMAX] = [ + ppc["bus"][:, BUS_TYPE] = bus_types_int # noqa: F405 + ppc["bus"][:, BASE_KV] = psa_net.buses["v_nom"].values # noqa: F405 + # for edisgo scenario voltage bounds defined for load and feed-in case with + # 0.985<= v <= 1.05 bounds have to be at least in that range, only accept stronger + # bounds if given + ppc["bus"][:, VMAX] = [ # noqa: F405 min(val, 1.05) for val in psa_net.buses["v_mag_pu_max"].values ] - ppc["bus"][:, VMIN] = [ + ppc["bus"][:, VMIN] = [ # noqa: F405 max(val, 0.985) for val in psa_net.buses["v_mag_pu_min"].values ] return @@ -359,7 +370,8 @@ def _build_bus(psa_net, ppc): def _build_gen(psa_net, ppc): n_gen = psa_net.generators.shape[0] gen_cols = 21 - # "bus, p_set, q_set, q_max, q_min, v_set_pu, mva_base, status, p_nom, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf + # "bus, p_set, q_set, q_max, q_min, v_set_pu, mva_base, status, p_nom, p_min, Pc1, + # Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf ppc["gen"] = np.zeros(shape=(n_gen, gen_cols), dtype=float) # get bus indices for generators bus_indices = np.array( @@ -373,26 +385,26 @@ def _build_gen(psa_net, ppc): n_gen, len(np.unique(bus_indices)) ) ) - ppc["gen"][:, GEN_BUS] = bus_indices + ppc["gen"][:, GEN_BUS] = bus_indices # noqa: F405 # adjust bus types - bus_types = ["PQ", "PV", "Slack", "None"] - gen_types = np.array( - [ - bus_types.index(gen_type) + 1 - for gen_type in psa_net.generators["control"].values - ], - dtype=int, - ) + # bus_types = ["PQ", "PV", "Slack", "None"] + # gen_types = np.array( + # [ + # bus_types.index(gen_type) + 1 + # for gen_type in psa_net.generators["control"].values + # ], + # dtype=int, + # ) # ppc["bus"][bus_indices,BUS_TYPE] = gen_types # set setpoint of pg and qg - ppc["gen"][:, PG] = psa_net.generators["p_set"].values - ppc["gen"][:, QG] = psa_net.generators["q_set"].values + ppc["gen"][:, PG] = psa_net.generators["p_set"].values # noqa: F405 + ppc["gen"][:, QG] = psa_net.generators["q_set"].values # noqa: F405 - ppc["gen"][:, MBASE] = 1.0 - ppc["gen"][:, GEN_STATUS] = 1.0 + ppc["gen"][:, MBASE] = 1.0 # noqa: F405 + ppc["gen"][:, GEN_STATUS] = 1.0 # noqa: F405 - ppc["gen"][:, PMAX] = psa_net.generators["p_nom"].values - ppc["gen"][:, PMIN] = 0 + ppc["gen"][:, PMAX] = psa_net.generators["p_nom"].values # noqa: F405 + ppc["gen"][:, PMIN] = 0 # noqa: F405 # TODO SET QMAX AND QMIN! e.g.: cos(phi) value from config # ppc["gen"][:,QMAX] = 0 # ppc["gen"][:, QMIN] = 0 @@ -403,23 +415,42 @@ def _build_gen(psa_net, ppc): cost_cols = 7 ppc["gencost"] = np.zeros(shape=(n_gen, cost_cols), dtype=float) # polynomial cost function - ppc["gencost"][:, MODEL] = POLYNOMIAL - ppc["gencost"][:, STARTUP] = psa_net.generators["start_up_cost"].values - ppc["gencost"][:, SHUTDOWN] = psa_net.generators["shut_down_cost"].values + ppc["gencost"][:, MODEL] = POLYNOMIAL # noqa: F405 + ppc["gencost"][:, STARTUP] = psa_net.generators[ # noqa: F405 + "start_up_cost" + ].values + ppc["gencost"][:, SHUTDOWN] = psa_net.generators[ # noqa: F405 + "shut_down_cost" + ].values # quadratic cost function has 3 cost coefficients - ppc["gencost"][:, NCOST] = 3 - ppc["gencost"][:, COST] = 0.0 - ppc["gencost"][:, COST + 1] = psa_net.generators["marginal_cost"].values - ppc["gencost"][:, COST + 2] = 0.0 + ppc["gencost"][:, NCOST] = 3 # noqa: F405 + ppc["gencost"][:, COST] = 0.0 # noqa: F405 + ppc["gencost"][:, COST + 1] = psa_net.generators[ # noqa: F405 + "marginal_cost" + ].values + ppc["gencost"][:, COST + 2] = 0.0 # noqa: F405 return def _build_branch(psa_net, ppc): n_branch = len(psa_net.lines.index) print("build {} lines".format(n_branch)) - col_names = "fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax".split( - ", " - ) + col_names = [ + "fbus", + "tbus", + "r", + "x", + "b", + "rateA", + "rateB", + "rateC", + "ratio", + "angle", + "status", + "angmin", + "angmax", + ] + branch_cols = len(col_names) ppc["branch"] = np.zeros(shape=(n_branch, branch_cols), dtype=float) from_bus = np.array( @@ -428,20 +459,20 @@ def _build_branch(psa_net, ppc): to_bus = np.array( [psa_net.buses.index.get_loc(bus_name) for bus_name in psa_net.lines["bus1"]] ) - ppc["branch"][:, F_BUS] = from_bus - ppc["branch"][:, T_BUS] = to_bus - - ppc["branch"][:, BR_R] = psa_net.lines["r_pu"].values - ppc["branch"][:, BR_X] = psa_net.lines["x_pu"].values - ppc["branch"][:, BR_B] = psa_net.lines["b_pu"].values - ppc["branch"][:, RATE_A] = psa_net.lines["s_nom"].values - ppc["branch"][:, RATE_B] = 250 # Default values - ppc["branch"][:, RATE_C] = 250 # Default values - ppc["branch"][:, TAP] = 0.0 - ppc["branch"][:, SHIFT] = 0.0 - ppc["branch"][:, BR_STATUS] = 1.0 - ppc["branch"][:, ANGMIN] = -360 - ppc["branch"][:, ANGMAX] = 360 + ppc["branch"][:, F_BUS] = from_bus # noqa: F405 + ppc["branch"][:, T_BUS] = to_bus # noqa: F405 + + ppc["branch"][:, BR_R] = psa_net.lines["r_pu"].values # noqa: F405 + ppc["branch"][:, BR_X] = psa_net.lines["x_pu"].values # noqa: F405 + ppc["branch"][:, BR_B] = psa_net.lines["b_pu"].values # noqa: F405 + ppc["branch"][:, RATE_A] = psa_net.lines["s_nom"].values # noqa: F405 + ppc["branch"][:, RATE_B] = 250 # Default values # noqa: F405 + ppc["branch"][:, RATE_C] = 250 # Default values # noqa: F405 + ppc["branch"][:, TAP] = 0.0 # noqa: F405 + ppc["branch"][:, SHIFT] = 0.0 # noqa: F405 + ppc["branch"][:, BR_STATUS] = 1.0 # noqa: F405 + ppc["branch"][:, ANGMIN] = -360 # noqa: F405 + ppc["branch"][:, ANGMAX] = 360 # noqa: F405 # TODO BRANCHCOSTS! # check which branch costs are given in psa_net, ncost = sum( @@ -451,9 +482,10 @@ def _build_branch(psa_net, ppc): if ncost == 0: print("no branch costs are given in pypsa network") elif ncost == 1: - if not "costs_cable" in psa_net.lines.columns: + if "costs_cable" not in psa_net.lines.columns: print( - "costs for cables not in pypsa network, not possible to define cost function for network expansion" + "costs for cables not in pypsa network, not possible to define cost" + "function for network expansion" ) else: ppc["branchcost"] = np.zeros(shape=(n_branch, 2), dtype=float) @@ -471,9 +503,21 @@ def _build_branch(psa_net, ppc): def _build_transformers(psa_net, ppc): n_transformers = len(psa_net.transformers.index) print("appending {} transformers".format(n_transformers)) - col_names = "fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax".split( - ", " - ) + col_names = [ + "fbus", + "tbus", + "r", + "x", + "b", + "rateA", + "rateB", + "rateC", + "ratio", + "angle", + "status", + "angmin", + "angmax", + ] transformers = np.zeros(shape=(n_transformers, len(col_names)), dtype=float) from_bus = np.array( @@ -488,20 +532,20 @@ def _build_transformers(psa_net, ppc): for bus_name in psa_net.transformers["bus1"] ] ) - transformers[:, F_BUS] = from_bus - transformers[:, T_BUS] = to_bus - - transformers[:, BR_R] = psa_net.transformers["r_pu"].values - transformers[:, BR_X] = psa_net.transformers["x_pu"].values - transformers[:, BR_B] = psa_net.transformers["b_pu"].values - transformers[:, RATE_A] = psa_net.transformers["s_nom"].values - transformers[:, RATE_B] = 250 # Default values - transformers[:, RATE_C] = 250 # Default values - transformers[:, TAP] = psa_net.transformers["tap_ratio"].values - transformers[:, SHIFT] = psa_net.transformers["phase_shift"].values - transformers[:, BR_STATUS] = 1.0 - transformers[:, ANGMIN] = -360 - transformers[:, ANGMAX] = 360 + transformers[:, F_BUS] = from_bus # noqa: F405 + transformers[:, T_BUS] = to_bus # noqa: F405 + + transformers[:, BR_R] = psa_net.transformers["r_pu"].values # noqa: F405 + transformers[:, BR_X] = psa_net.transformers["x_pu"].values # noqa: F405 + transformers[:, BR_B] = psa_net.transformers["b_pu"].values # noqa: F405 + transformers[:, RATE_A] = psa_net.transformers["s_nom"].values # noqa: F405 + transformers[:, RATE_B] = 250 # Default values # noqa: F405 + transformers[:, RATE_C] = 250 # Default values # noqa: F405 + transformers[:, TAP] = psa_net.transformers["tap_ratio"].values # noqa: F405 + transformers[:, SHIFT] = psa_net.transformers["phase_shift"].values # noqa: F405 + transformers[:, BR_STATUS] = 1.0 # noqa: F405 + transformers[:, ANGMIN] = -360 # noqa: F405 + transformers[:, ANGMAX] = 360 # noqa: F405 ppc["branch"] = np.append(ppc["branch"], transformers, axis=0) # add trafo costs to branch cost with same shape @@ -530,7 +574,7 @@ def _build_load(psa_net, ppc): ) ) - ## USE LOAD DATA FROM psa_net.loads as static network data + # USE LOAD DATA FROM psa_net.loads as static network data # set bool if loads contains a timeseries # istime = len(psa_net.loads_t["p_set"].values[0]) != 0 # istime = False @@ -538,15 +582,17 @@ def _build_load(psa_net, ppc): for (load_idx, bus_idx) in enumerate(load_buses): # if istime: - # # if timeseries take maximal value of load_bus for static information of the network + # # if timeseries take maximal value of load_bus for static information of + # # the network # p_d = max(psa_net.loads_t["p_set"].values[:,load_idx]) # q_d = max(psa_net.loads_t["q_set"].values[:,load_idx]) # else: p_d = psa_net.loads["p_set"].values[load_idx] q_d = psa_net.loads["q_set"].values[load_idx] - # increase demand at bus_idx by p_d and q_d from load_idx, as multiple loads can be attached to single bus - ppc["bus"][bus_idx, PD] += p_d - ppc["bus"][bus_idx, QD] += q_d + # increase demand at bus_idx by p_d and q_d from load_idx, as multiple loads + # can be attached to single bus + ppc["bus"][bus_idx, PD] += p_d # noqa: F405 + ppc["bus"][bus_idx, QD] += q_d # noqa: F405 return @@ -591,8 +637,10 @@ def _build_generator_dict(psa_net, ppc): generator_dict = {"gen_data": dict()} time_horizon = len(psa_net.generators_t["p_set"]) generator_dict["time_horizon"] = time_horizon - # buses_with_gens = [psa_net.generators.loc[busname]["bus"] for busname in psa_net.generators_t["p_set"].columns] - # gen_buses = np.array([psa_net.buses.index.get_loc(bus_name) for bus_name in buses_with_gens]) + # buses_with_gens = [psa_net.generators.loc[busname]["bus"] for busname in + # psa_net.generators_t["p_set"].columns] + # gen_buses = np.array([psa_net.buses.index.get_loc(bus_name) for bus_name in + # buses_with_gens]) gen_buses = [ psa_net.buses.index.get_loc(bus_name) for bus_name in psa_net.generators["bus"] ] diff --git a/edisgo/tools/preprocess_pypsa_opf_structure.py b/edisgo/tools/preprocess_pypsa_opf_structure.py index 5bb53b15e..d004cc9c4 100644 --- a/edisgo/tools/preprocess_pypsa_opf_structure.py +++ b/edisgo/tools/preprocess_pypsa_opf_structure.py @@ -47,9 +47,8 @@ def preprocess_pypsa_opf_structure(edisgo_grid, psa_network, hvmv_trafo=False): # check for nan value if is_fluct != is_fluct: print( - "value of fluctuating for slack generator is {}, it is changed to zero".format( - is_fluct - ) + f"value of fluctuating for slack generator is {is_fluct}, it is changed to" + f" zero" ) psa_network.generators.fluctuating.loc[gen_slack_loc] = False psa_network.generators.p_nom.loc[gen_slack_loc] = False @@ -82,7 +81,12 @@ def preprocess_pypsa_opf_structure(edisgo_grid, psa_network, hvmv_trafo=False): slack_bus_hv.v_nom = trafo["v_nom_0"] slack_bus_hv.control = "Slack" - buses_df = slack_bus_hv.append(psa_network.buses) + buses_df = pd.concat( + [ + slack_bus_hv, + psa_network.buses, + ] + ) psa_network.buses = buses_df # Move Generator_slack to new slack bus @@ -175,10 +179,21 @@ def aggregate_fluct_generators(psa_network): }, index=[gen_name], ) - gen_aggr_df_all = gen_aggr_df_all.append(gen_aggr_df) - # drop aggregated generators and add new generator to generator dataframe + gen_aggr_df_all = pd.concat( + [ + gen_aggr_df_all, + gen_aggr_df, + ] + ) + # drop aggregated generators and add new generator to generator + # dataframe gen_df = gen_df.drop(gens_to_aggr.index) - gen_df = gen_df.append(gen_aggr_df) + gen_df = pd.concat( + [ + gen_df, + gen_aggr_df, + ] + ) # gens = gens.drop(gens_to_aggr.index) # sum timeseries for aggregated generators @@ -205,5 +220,6 @@ def aggregate_fluct_generators(psa_network): # write aggregated generator dataframe on pypsa network psa_network.generators = gen_df - # write aggregated timeseries into psa_network.generators_t as pypsa.descriptors.Dict() + # write aggregated timeseries into psa_network.generators_t as + # pypsa.descriptors.Dict() psa_network.generators_t = Dict(gen_t_dict) diff --git a/edisgo/tools/tools.py b/edisgo/tools/tools.py index 8a2751a18..d7603c8a9 100644 --- a/edisgo/tools/tools.py +++ b/edisgo/tools/tools.py @@ -14,17 +14,12 @@ if "READTHEDOCS" not in os.environ: + import geopandas as gpd + from egoio.db_tables import climate - from egoio.tools.db import connection from shapely.geometry.multipolygon import MultiPolygon from shapely.wkt import loads as wkt_loads - geopandas = True - try: - import geopandas as gpd - except: - geopandas = False - def select_worstcase_snapshots(edisgo_obj): """ @@ -77,7 +72,8 @@ def calculate_relative_line_load(edisgo_obj, lines=None, timesteps=None): Line names/representatives of lines to calculate line loading for. If None, line loading is calculated for all lines in the network. Default: None. - timesteps : :pandas:`pandas.Timestamp` or list(:pandas:`pandas.Timestamp`) or None, optional + timesteps : :pandas:`pandas.Timestamp` or \ + list(:pandas:`pandas.Timestamp`) or None, optional Specifies time steps to calculate line loading for. If timesteps is None, all time steps power flow analysis was conducted for are used. Default: None. @@ -394,7 +390,7 @@ def get_path_length_to_station(edisgo_obj): return edisgo_obj.topology.buses_df.path_length_to_station -def assign_voltage_level_to_component(edisgo_obj, df): +def assign_voltage_level_to_component(df, buses_df): """ Adds column with specification of voltage level component is in. @@ -405,10 +401,13 @@ def assign_voltage_level_to_component(edisgo_obj, df): Parameters ---------- - edisgo_obj : :class:`~.EDisGo` df : :pandas:`pandas.DataFrame` Dataframe with component names in the index. Only required column is column 'bus', giving the name of the bus the component is connected to. + buses_df : :pandas:`pandas.DataFrame` + Dataframe with bus information. Bus names are in the index. Only required column + is column 'v_nom', giving the nominal voltage of the voltage level the + bus is in. Returns -------- @@ -419,7 +418,7 @@ def assign_voltage_level_to_component(edisgo_obj, df): """ df["voltage_level"] = df.apply( - lambda _: "lv" if edisgo_obj.topology.buses_df.at[_.bus, "v_nom"] < 1 else "mv", + lambda _: "lv" if buses_df.at[_.bus, "v_nom"] < 1 else "mv", axis=1, ) return df diff --git a/examples/edisgo_simple_example.ipynb b/examples/edisgo_simple_example.ipynb index 6296ce351..4e4a943c0 100755 --- a/examples/edisgo_simple_example.ipynb +++ b/examples/edisgo_simple_example.ipynb @@ -229,7 +229,8 @@ } ], "source": [ - "edisgo = EDisGo(ding0_grid=ding0_grid, worst_case_analysis=worst_case_analysis)" + "edisgo = EDisGo(ding0_grid=ding0_grid)\n", + "edisgo.set_time_series_worst_case_analysis()" ] }, { @@ -2659,7 +2660,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -2673,7 +2674,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.11" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/examples/example_grid_reinforcement.py b/examples/example_grid_reinforcement.py index bb6948474..ae08a8588 100644 --- a/examples/example_grid_reinforcement.py +++ b/examples/example_grid_reinforcement.py @@ -28,13 +28,12 @@ import logging import os -import pandas as pd import requests from edisgo import EDisGo from edisgo.network.results import Results -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -70,23 +69,24 @@ def run_example(): scenario = "nep2035" # Set up worst-case scenario - edisgo = EDisGo(ding0_grid=dingo_grid_path, worst_case_analysis="worst-case") + edisgo = EDisGo(ding0_grid=dingo_grid_path) + edisgo.set_time_series_worst_case_analysis() # Reinforce ding0 grid to obtain a stable status quo grid logging.info("Conduct grid reinforcement to obtain stable status quo grid.") # Overwrite config parameters for allowed voltage deviations in # initial topology reinforcement to better represent currently used limits edisgo.config["grid_expansion_allowed_voltage_deviations"] = { - "feedin_case_lower": 0.9, + "feed-in_case_lower": 0.9, "load_case_upper": 1.1, "hv_mv_trafo_offset": 0.04, "hv_mv_trafo_control_deviation": 0.0, "mv_load_case_max_v_deviation": 0.055, - "mv_feedin_case_max_v_deviation": 0.02, + "mv_feed-in_case_max_v_deviation": 0.02, "lv_load_case_max_v_deviation": 0.065, - "lv_feedin_case_max_v_deviation": 0.03, + "lv_feed-in_case_max_v_deviation": 0.03, "mv_lv_station_load_case_max_v_deviation": 0.02, - "mv_lv_station_feedin_case_max_v_deviation": 0.01, + "mv_lv_station_feed-in_case_max_v_deviation": 0.01, } # Conduct reinforcement edisgo.reinforce() @@ -100,6 +100,7 @@ def run_example(): # Get data on generators in NEP scenario and connect generators to the grid edisgo.import_generators(generator_scenario=scenario) + edisgo.set_time_series_worst_case_analysis() # Conduct topology reinforcement edisgo.reinforce() diff --git a/examples/plot_example.ipynb b/examples/plot_example.ipynb index 2e8a5494a..f8f768096 100755 --- a/examples/plot_example.ipynb +++ b/examples/plot_example.ipynb @@ -7,9 +7,9 @@ "outputs": [], "source": [ "__copyright__ = \"Reiner Lemoine Institut gGmbH\"\n", - "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", - "__url__ = \"https://github.com/openego/eDisGo/blob/master/LICENSE\"\n", - "__author__ = \"mltja\"" + "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", + "__url__ = \"https://github.com/openego/eDisGo/blob/master/LICENSE\"\n", + "__author__ = \"mltja\"" ] }, { @@ -81,28 +81,36 @@ "outputs": [], "source": [ "def download_ding0_example_grid():\n", - " \n", + "\n", " # create directories to save ding0 example grid into\n", " ding0_example_grid_path = os.path.join(\n", - " os.path.expanduser(\"~\"),\n", - " \".edisgo\",\n", - " \"ding0_test_network\")\n", - " os.makedirs(\n", - " ding0_example_grid_path,\n", - " exist_ok=True)\n", + " os.path.expanduser(\"~\"), \".edisgo\", \"ding0_test_network\"\n", + " )\n", + " os.makedirs(ding0_example_grid_path, exist_ok=True)\n", "\n", " # download files\n", " filenames = [\n", - " \"buses\", \"generators\", \"lines\", \"loads\", \"network\",\n", - " \"switches\", \"transformers\", \"transformers_hvmv\"]\n", + " \"buses\",\n", + " \"generators\",\n", + " \"lines\",\n", + " \"loads\",\n", + " \"network\",\n", + " \"switches\",\n", + " \"transformers\",\n", + " \"transformers_hvmv\",\n", + " ]\n", "\n", " for file in filenames:\n", " req = requests.get(\n", - " \"https://raw.githubusercontent.com/openego/eDisGo/dev/tests/ding0_test_network_2/{}.csv\".format(file))\n", + " \"https://raw.githubusercontent.com/openego/eDisGo/dev/tests/ding0_test_network_2/{}.csv\".format(\n", + " file\n", + " )\n", + " )\n", " filename = os.path.join(ding0_example_grid_path, \"{}.csv\".format(file))\n", " with open(filename, \"wb\") as fout:\n", " fout.write(req.content)\n", "\n", + "\n", "download_ding0_example_grid()" ] }, @@ -112,10 +120,7 @@ "metadata": {}, "outputs": [], "source": [ - "ding0_grid = os.path.join(\n", - " os.path.expanduser(\"~\"),\n", - " \".edisgo\",\n", - " \"ding0_test_network\")" + "ding0_grid = os.path.join(os.path.expanduser(\"~\"), \".edisgo\", \"ding0_test_network\")" ] }, { @@ -124,7 +129,7 @@ "metadata": {}, "outputs": [], "source": [ - "worst_case_analysis = 'worst-case'" + "worst_case_analysis = \"worst-case\"" ] }, { @@ -7476,13 +7481,11 @@ "edisgo_obj = edisgo_root\n", "grid = edisgo_obj.topology.mv_grid\n", "G = grid.graph\n", - "mode_lines ='relative_loading'\n", - "mode_nodes ='voltage_deviation'\n", - "fig = draw_plotly(edisgo_obj,\n", - " G,\n", - " line_color=mode_lines,\n", - " node_color=mode_nodes,\n", - " grid=grid)\n", + "mode_lines = \"relative_loading\"\n", + "mode_nodes = \"voltage_deviation\"\n", + "fig = draw_plotly(\n", + " edisgo_obj, G, line_color=mode_lines, node_color=mode_nodes, grid=grid\n", + ")\n", "fig.show()" ] }, @@ -14572,13 +14575,11 @@ "source": [ "edisgo_obj = edisgo_root\n", "G = edisgo_obj.topology.mv_grid.graph\n", - "mode_lines ='loading'\n", - "mode_nodes ='voltage_deviation'\n", - "fig = draw_plotly(edisgo_obj,\n", - " G,\n", - " line_color=mode_lines,\n", - " node_color=mode_nodes,\n", - " grid=False)\n", + "mode_lines = \"loading\"\n", + "mode_nodes = \"voltage_deviation\"\n", + "fig = draw_plotly(\n", + " edisgo_obj, G, line_color=mode_lines, node_color=mode_nodes, grid=False\n", + ")\n", "fig.show()" ] }, @@ -21826,13 +21827,11 @@ "source": [ "edisgo_obj = edisgo_reinforced\n", "G = edisgo_obj.topology.mv_grid.graph\n", - "mode_lines ='reinforce'\n", - "mode_nodes ='adjecencies'\n", - "fig = draw_plotly(edisgo_obj,\n", - " G,\n", - " line_color=mode_lines,\n", - " node_color=mode_nodes,\n", - " grid=False)\n", + "mode_lines = \"reinforce\"\n", + "mode_nodes = \"adjecencies\"\n", + "fig = draw_plotly(\n", + " edisgo_obj, G, line_color=mode_lines, node_color=mode_nodes, grid=False\n", + ")\n", "fig.show()" ] }, @@ -21932,7 +21931,9 @@ } ], "source": [ - "app = dash_plot(edisgo_objects={\"edisgo_obj_1\": edisgo_root, \"edisgo_obj_2\": edisgo_reinforced})\n", + "app = dash_plot(\n", + " edisgo_objects={\"edisgo_obj_1\": edisgo_root, \"edisgo_obj_2\": edisgo_reinforced}\n", + ")\n", "app.run_server(mode=\"inline\")" ] }, @@ -21972,7 +21973,13 @@ } ], "source": [ - "app = dash_plot(edisgo_objects={\"edisgo_obj_1\": edisgo_root, \"edisgo_obj_2\": edisgo_reinforced, \"edisgo_obj_3\": edisgo_copy})\n", + "app = dash_plot(\n", + " edisgo_objects={\n", + " \"edisgo_obj_1\": edisgo_root,\n", + " \"edisgo_obj_2\": edisgo_reinforced,\n", + " \"edisgo_obj_3\": edisgo_copy,\n", + " }\n", + ")\n", "app.run_server(mode=\"inline\")" ] }, diff --git a/rtd_requirements.txt b/rtd_requirements.txt index a3d7b2740..68026475e 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -1,8 +1,7 @@ -sphinx_rtd_theme demandlib networkx >= 2.5.0 geopy >= 2.0.0 -pandas >= 1.2.0, < 1.3.0 +pandas >= 1.2.0 pyproj >= 3.0.0 pypsa >=0.17.0 pyomo >= 6.0 @@ -10,5 +9,11 @@ multiprocess workalendar egoio >= 0.4.7 matplotlib >= 3.3.0 +plotly +dash == 2.0.0 +werkzeug==2.0.3 +jupyter_dash pypower sklearn +sphinx_rtd_theme +sphinx-autodoc-typehints diff --git a/setup.py b/setup.py index 60df2034e..971fdfc63 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,6 @@ import sys from setuptools import find_packages, setup -from setuptools.command.install import install if sys.version_info[:2] < (3, 7): error = ( @@ -35,7 +34,7 @@ def read(fname): "demandlib", "networkx >= 2.5.0", "geopy >= 2.0.0", - "pandas >= 1.2.0, < 1.3.0", + "pandas >= 1.2.0", "geopandas >= 0.9.0", "pyproj >= 3.0.0", "shapely >= 1.7.0", @@ -53,17 +52,25 @@ def read(fname): "pygeos", ] -geo_plot_requirements = ["contextily", "descartes", "plotly", "dash==2.0.0"] +geo_plot_requirements = [ + "contextily", + "descartes", + "plotly", + "dash==2.0.0", + "werkzeug==2.0.3", +] examples_requirements = [ "jupyter", "jupyterlab", "plotly", "dash==2.0.0", "jupyter_dash", + "werkzeug==2.0.3", ] dev_requirements = [ "pytest", "sphinx_rtd_theme", + "sphinx-autodoc-typehints", "pre-commit", "black", "isort", diff --git a/tests/flex_opt/test_check_tech_constraints.py b/tests/flex_opt/test_check_tech_constraints.py index 329c854e7..348657d0f 100644 --- a/tests/flex_opt/test_check_tech_constraints.py +++ b/tests/flex_opt/test_check_tech_constraints.py @@ -11,10 +11,8 @@ class TestCheckTechConstraints: @classmethod def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() self.timesteps = self.edisgo.timeseries.timeindex @pytest.fixture(autouse=True) @@ -34,10 +32,10 @@ def test_mv_line_load(self): # check relative overload of one line assert np.isclose( df.at["Line_10005", "max_rel_overload"], - self.edisgo.results.i_res.at[self.timesteps[0], "Line_10005"] + self.edisgo.results.i_res.at[self.timesteps[3], "Line_10005"] / (7.274613391789284 / 20 / sqrt(3)), ) - assert df.at["Line_10005", "time_index"] == self.timesteps[0] + assert df.at["Line_10005", "time_index"] == self.timesteps[3] def test_lv_line_load(self): # implicitly checks function _line_load @@ -48,18 +46,18 @@ def test_lv_line_load(self): # check relative overload of one line assert np.isclose( df.at["Line_50000002", "max_rel_overload"], - self.edisgo.results.i_res.at[self.timesteps[1], "Line_50000002"] + self.edisgo.results.i_res.at[self.timesteps[0], "Line_50000002"] / (0.08521689973238901 / 0.4 / sqrt(3)), ) - assert df.at["Line_50000002", "time_index"] == self.timesteps[1] + assert df.at["Line_50000002", "time_index"] == self.timesteps[0] def test_hv_mv_station_load(self): # implicitly checks function _station_load - # create over-load problem in both time steps with higher over-load - # in load case + # create over-load problem with highest over-load in first time step (as it is + # a load case) self.edisgo.results.pfa_slack = pd.DataFrame( - data={"p": [30, 25], "q": [30, 25]}, index=self.timesteps + data={"p": [30, 25, 30, 20], "q": [30, 25, 30, 20]}, index=self.timesteps ) df = check_tech_constraints.hv_mv_station_load(self.edisgo) @@ -68,9 +66,9 @@ def test_hv_mv_station_load(self): # check missing transformer capacity assert np.isclose( df.at["MVGrid_1", "s_missing"], - (sqrt(1250) - 20) / 0.5, + (np.hypot(30, 30) - 20) / 0.5, ) - assert df.at["MVGrid_1", "time_index"] == self.timesteps[1] + assert df.at["MVGrid_1", "time_index"] == self.timesteps[0] def test_mv_lv_station_load(self): # implicitly checks function _station_load @@ -84,41 +82,41 @@ def test_mv_lv_station_load(self): self.edisgo.results.s_res.at[self.timesteps[1], "LVStation_1_transformer_1"] - 0.16, ) - assert df.at["LVGrid_1", "time_index"] == self.timesteps[1] + assert df.at["LVGrid_1", "time_index"] == self.timesteps[0] def test_lines_allowed_load(self): # check for MV df = check_tech_constraints.lines_allowed_load(self.edisgo, "mv") # check shape of dataframe - assert (2, 30) == df.shape + assert (4, 30) == df.shape # check in feed-in case assert np.isclose( - df.at[self.timesteps[0], "Line_10005"], - 7.274613391789284 / 20 / sqrt(3), + df.at[self.timesteps[2], "Line_10005"], + 7.27461339178928 / 20 / sqrt(3), ) # check in load case (line in cycle as well as stub) assert np.isclose( - df.at[self.timesteps[1], "Line_10005"], + df.at[self.timesteps[0], "Line_10005"], 7.274613391789284 / 20 / sqrt(3) * 0.5, ) assert np.isclose( - df.at[self.timesteps[1], "Line_10024"], + df.at[self.timesteps[0], "Line_10024"], 7.27461339178928 / 20 / sqrt(3), ) # check for LV df = check_tech_constraints.lines_allowed_load(self.edisgo, "lv") # check shape of dataframe - assert (2, 99) == df.shape + assert (4, 99) == df.shape # check in feed-in case assert np.isclose( - df.at[self.timesteps[0], "Line_50000002"], + df.at[self.timesteps[2], "Line_50000002"], 0.08521689973238901 / 0.4 / sqrt(3), ) # check in load case assert np.isclose( - df.at[self.timesteps[1], "Line_50000002"], + df.at[self.timesteps[0], "Line_50000002"], 0.08521689973238901 / 0.4 / sqrt(3), ) @@ -175,12 +173,12 @@ def test_mv_voltage_deviation(self): # check under- and overvoltage deviation values assert list(voltage_issues["MVGrid_1"].index.values) == [ "Bus_Generator_1", - "Bus_GeneratorFluctuating_2", "Bus_GeneratorFluctuating_3", + "Bus_GeneratorFluctuating_2", ] assert np.isclose( voltage_issues["MVGrid_1"].at["Bus_GeneratorFluctuating_2", "v_diff_max"], - 0.06, + 0.01, ) assert ( voltage_issues["MVGrid_1"].at["Bus_Generator_1", "time_index"] @@ -215,10 +213,10 @@ def test_lv_voltage_deviation(self): # check with voltage_levels="lv" and mode=None # create one voltage issue in LVGrid_6 self.edisgo.results.v_res.at[ - self.timesteps[0], "BusBar_MVGrid_1_LVGrid_6_LV" + self.timesteps[2], "BusBar_MVGrid_1_LVGrid_6_LV" ] = 1.14 self.edisgo.results.v_res.at[ - self.timesteps[0], "Bus_BranchTee_LVGrid_6_1" + self.timesteps[2], "Bus_BranchTee_LVGrid_6_1" ] = 1.18 voltage_issues = check_tech_constraints.lv_voltage_deviation( self.edisgo, voltage_levels="lv" @@ -231,7 +229,7 @@ def test_lv_voltage_deviation(self): ) # create second voltage issue in LVGrid_6, greater than first issue self.edisgo.results.v_res.at[ - self.timesteps[0], "Bus_BranchTee_LVGrid_6_2" + self.timesteps[2], "Bus_BranchTee_LVGrid_6_2" ] = 1.19 voltage_issues = check_tech_constraints.lv_voltage_deviation( self.edisgo, voltage_levels="lv" @@ -274,10 +272,10 @@ def test__mv_allowed_voltage_limits(self): v_limits_lower, ) = check_tech_constraints._mv_allowed_voltage_limits(self.edisgo, "mv") - assert 1.05 == v_limits_upper.loc[self.timesteps[0]] - assert 1.10 == v_limits_upper.loc[self.timesteps[1]] - assert 0.90 == v_limits_lower.loc[self.timesteps[0]] - assert 0.985 == v_limits_lower.loc[self.timesteps[1]] + assert 1.05 == v_limits_upper.loc[self.timesteps[2]] + assert 1.10 == v_limits_upper.loc[self.timesteps[0]] + assert 0.90 == v_limits_lower.loc[self.timesteps[2]] + assert 0.985 == v_limits_lower.loc[self.timesteps[0]] # run function with voltage_levels="mv_lv" ( @@ -285,10 +283,10 @@ def test__mv_allowed_voltage_limits(self): v_limits_lower, ) = check_tech_constraints._mv_allowed_voltage_limits(self.edisgo, "mv_lv") + assert 1.10 == v_limits_upper.loc[self.timesteps[3]] assert 1.10 == v_limits_upper.loc[self.timesteps[0]] - assert 1.10 == v_limits_upper.loc[self.timesteps[1]] + assert 0.90 == v_limits_lower.loc[self.timesteps[3]] assert 0.90 == v_limits_lower.loc[self.timesteps[0]] - assert 0.90 == v_limits_lower.loc[self.timesteps[1]] def test__lv_allowed_voltage_limits(self): @@ -296,10 +294,10 @@ def test__lv_allowed_voltage_limits(self): lv_grid = self.edisgo.topology._grids["LVGrid_1"] # set voltage at stations' secondary side to known value self.edisgo.results._v_res.loc[ - self.timesteps[0], "BusBar_MVGrid_1_LVGrid_1_LV" + self.timesteps[2], "BusBar_MVGrid_1_LVGrid_1_LV" ] = 1.05 self.edisgo.results._v_res.loc[ - self.timesteps[1], "BusBar_MVGrid_1_LVGrid_1_LV" + self.timesteps[0], "BusBar_MVGrid_1_LVGrid_1_LV" ] = 0.98 # run function with mode=None @@ -310,14 +308,14 @@ def test__lv_allowed_voltage_limits(self): self.edisgo, lv_grid, mode=None ) - assert 1.085 == v_limits_upper.loc[self.timesteps[0]] - assert 1.10 == v_limits_upper.loc[self.timesteps[1]] - assert 0.90 == v_limits_lower.loc[self.timesteps[0]] - assert 0.915 == v_limits_lower.loc[self.timesteps[1]] + assert 1.085 == v_limits_upper.loc[self.timesteps[2]] + assert 1.10 == v_limits_upper.loc[self.timesteps[0]] + assert 0.90 == v_limits_lower.loc[self.timesteps[2]] + assert 0.915 == v_limits_lower.loc[self.timesteps[0]] # set voltage at stations' primary side to known value self.edisgo.results._v_res.loc[ - self.timesteps[0], "BusBar_MVGrid_1_LVGrid_1_MV" + self.timesteps[3], "BusBar_MVGrid_1_LVGrid_1_MV" ] = 1.03 self.edisgo.results._v_res.loc[ self.timesteps[1], "BusBar_MVGrid_1_LVGrid_1_MV" @@ -331,9 +329,9 @@ def test__lv_allowed_voltage_limits(self): self.edisgo, lv_grid, mode="stations" ) - assert 1.045 == v_limits_upper.loc[self.timesteps[0]] + assert 1.045 == v_limits_upper.loc[self.timesteps[3]] assert 1.10 == v_limits_upper.loc[self.timesteps[1]] - assert 0.90 == v_limits_lower.loc[self.timesteps[0]] + assert 0.90 == v_limits_lower.loc[self.timesteps[3]] assert 0.97 == v_limits_lower.loc[self.timesteps[1]] def test_voltage_diff(self): @@ -349,8 +347,8 @@ def test_voltage_diff(self): ) # check shapes of under- and overvoltage dataframes - assert (2, 2) == uv_violations.shape - assert (1, 2) == ov_violations.shape + assert (2, 4) == uv_violations.shape + assert (1, 4) == ov_violations.shape # check under- and overvoltage deviation values assert np.isclose(uv_violations.at["Bus_Generator_1", self.timesteps[1]], 0.02) assert np.isclose( diff --git a/tests/flex_opt/test_costs.py b/tests/flex_opt/test_costs.py index 3774c1b81..f5dea1b0a 100644 --- a/tests/flex_opt/test_costs.py +++ b/tests/flex_opt/test_costs.py @@ -9,10 +9,8 @@ class TestCosts: @classmethod def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() self.edisgo.analyze() def test_costs(self): @@ -21,8 +19,11 @@ def test_costs(self): "MVStation_1_transformer_1" ] hv_mv_trafo.name = "MVStation_1_transformer_reinforced_2" - self.edisgo.topology.transformers_hvmv_df = ( - self.edisgo.topology.transformers_hvmv_df.append(hv_mv_trafo) + self.edisgo.topology.transformers_hvmv_df = pd.concat( + [ + self.edisgo.topology.transformers_hvmv_df, + hv_mv_trafo.to_frame().T, + ] ) mv_lv_trafo = self.edisgo.topology.transformers_df.loc[ "LVStation_1_transformer_1" @@ -31,8 +32,11 @@ def test_costs(self): self.edisgo.topology.transformers_df.drop( "LVStation_1_transformer_1", inplace=True ) - self.edisgo.topology.transformers_df = ( - self.edisgo.topology.transformers_df.append(mv_lv_trafo) + self.edisgo.topology.transformers_df = pd.concat( + [ + self.edisgo.topology.transformers_df, + mv_lv_trafo.to_frame().T, + ] ) self.edisgo.results.equipment_changes = pd.DataFrame( diff --git a/tests/flex_opt/test_q_control.py b/tests/flex_opt/test_q_control.py new file mode 100644 index 000000000..bba452920 --- /dev/null +++ b/tests/flex_opt/test_q_control.py @@ -0,0 +1,221 @@ +import numpy as np +import pandas as pd + +from edisgo.flex_opt import q_control +from edisgo.tools.config import Config + + +class TestQControl: + def test_get_q_sign_generator(self): + assert q_control.get_q_sign_generator("Inductive") == -1 + assert q_control.get_q_sign_generator("capacitive") == 1 + + def test_get_q_sign_load(self): + assert q_control.get_q_sign_load("inductive") == 1 + assert q_control.get_q_sign_load("Capacitive") == -1 + + def test_fixed_cosphi(self): + + timeindex = pd.date_range("1/1/1970", periods=2, freq="H") + active_power_ts = pd.DataFrame( + data={ + "comp_mv_1": [0.5, 1.5], + "comp_mv_2": [2.5, 3.5], + "comp_lv_1": [0.1, 0.0], + "comp_lv_2": [0.15, 0.07], + }, + index=timeindex, + ) + q_sign = pd.Series( + [-1.0, 1.0, 1.0, -1], + index=["comp_mv_1", "comp_mv_2", "comp_lv_1", "comp_lv_2"], + ) + power_factor = pd.Series( + [0.9, 0.95, 1.0, 0.9], + index=["comp_mv_1", "comp_mv_2", "comp_lv_1", "comp_lv_2"], + ) + + # test with q_sign as Series and power_factor as float + reactive_power_ts = q_control.fixed_cosphi( + active_power_ts, + q_sign=q_sign, + power_factor=0.9, + ) + + assert reactive_power_ts.shape == (2, 4) + assert np.isclose( + reactive_power_ts.loc[:, ["comp_mv_1", "comp_lv_2"]].values, + active_power_ts.loc[:, ["comp_mv_1", "comp_lv_2"]].values * -0.484322, + ).all() + assert np.isclose( + reactive_power_ts.loc[:, "comp_lv_1"].values, + active_power_ts.loc[:, "comp_lv_1"].values * 0.484322, + ).all() + + # test with q_sign as int and power_factor as Series + reactive_power_ts = q_control.fixed_cosphi( + active_power_ts, + q_sign=1, + power_factor=power_factor, + ) + + assert reactive_power_ts.shape == (2, 4) + assert np.isclose( + reactive_power_ts.loc[:, ["comp_mv_1", "comp_lv_2"]].values, + active_power_ts.loc[:, ["comp_mv_1", "comp_lv_2"]].values * 0.484322, + ).all() + assert np.isclose( + reactive_power_ts.loc[:, "comp_lv_1"].values, + [0.0, 0.0], + ).all() + assert np.isclose( + reactive_power_ts.loc[:, "comp_mv_2"].values, + active_power_ts.loc[:, "comp_mv_2"].values * 0.328684, + ).all() + + # test with q_sign as int and power_factor as float + reactive_power_ts = q_control.fixed_cosphi( + active_power_ts, + q_sign=1, + power_factor=0.95, + ) + + assert reactive_power_ts.shape == (2, 4) + assert np.isclose( + reactive_power_ts.loc[ + :, ["comp_mv_1", "comp_mv_2", "comp_lv_1", "comp_lv_2"] + ].values, + active_power_ts.loc[ + :, ["comp_mv_1", "comp_mv_2", "comp_lv_1", "comp_lv_2"] + ].values + * 0.328684, + ).all() + + def test__fixed_cosphi_default_power_factor( + self, + ): + df = pd.DataFrame( + data={"voltage_level": ["mv", "lv", "lv"]}, + index=["comp_mv_1", "comp_lv_1", "comp_lv_2"], + ) + config = Config() + + # test for component_type="generators" + pf = q_control._fixed_cosphi_default_power_factor( + comp_df=df, component_type="generators", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [0.9, 0.95, 0.95], + ).all() + + # test for component_type="loads" + pf = q_control._fixed_cosphi_default_power_factor( + comp_df=df, component_type="loads", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [0.9, 0.95, 0.95], + ).all() + + # test for component_type="charging_points" + pf = q_control._fixed_cosphi_default_power_factor( + comp_df=df, component_type="charging_points", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [1.0, 1.0, 1.0], + ).all() + + # test for component_type="heat_pumps" + pf = q_control._fixed_cosphi_default_power_factor( + comp_df=df, component_type="heat_pumps", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [1.0, 1.0, 1.0], + ).all() + + # test for component_type="storage_units" + pf = q_control._fixed_cosphi_default_power_factor( + comp_df=df, component_type="storage_units", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [0.9, 0.95, 0.95], + ).all() + + def test__fixed_cosphi_default_reactive_power_sign( + self, + ): + + df = pd.DataFrame( + data={"voltage_level": ["mv", "lv", "lv"]}, + index=["comp_mv_1", "comp_lv_1", "comp_lv_2"], + ) + config = Config() + + # test for component_type="generators" + pf = q_control._fixed_cosphi_default_reactive_power_sign( + comp_df=df, component_type="generators", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [-1.0, -1.0, -1.0], + ).all() + + # test for component_type="loads" + pf = q_control._fixed_cosphi_default_reactive_power_sign( + comp_df=df, component_type="loads", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [1.0, 1.0, 1.0], + ).all() + + # test for component_type="charging_points" + pf = q_control._fixed_cosphi_default_reactive_power_sign( + comp_df=df, component_type="charging_points", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [1.0, 1.0, 1.0], + ).all() + + # test for component_type="heat_pumps" + pf = q_control._fixed_cosphi_default_reactive_power_sign( + comp_df=df, component_type="heat_pumps", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [1.0, 1.0, 1.0], + ).all() + + # test for component_type="storage_units" + pf = q_control._fixed_cosphi_default_reactive_power_sign( + comp_df=df, component_type="storage_units", configs=config + ) + + assert pf.shape == (3,) + assert np.isclose( + pf.loc[["comp_mv_1", "comp_lv_1", "comp_lv_2"]].values, + [-1.0, -1.0, -1.0], + ).all() diff --git a/tests/flex_opt/test_reinforce_grid.py b/tests/flex_opt/test_reinforce_grid.py new file mode 100644 index 000000000..461655ecf --- /dev/null +++ b/tests/flex_opt/test_reinforce_grid.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +from numpy.testing import assert_array_equal +from pandas.testing import assert_frame_equal + +from edisgo import EDisGo +from edisgo.flex_opt.reinforce_grid import reinforce_grid + + +class TestReinforceGrid: + @classmethod + def setup_class(cls): + cls.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + + cls.edisgo.set_time_series_worst_case_analysis() + + def test_reinforce_grid(self): + modes = [None, "mv", "mvlv", "lv"] + + results_dict = { + mode: reinforce_grid(edisgo=self.edisgo, copy_grid=True, mode=mode) + for mode in modes + } + + for mode, result in results_dict.items(): + if mode is None: + target = ["mv/lv", "mv", "lv"] + elif mode == "mv": + target = ["mv"] + elif mode == "mvlv": + target = ["mv", "mv/lv"] + else: + target = ["mv/lv", "lv"] + + assert_array_equal( + np.sort(target), + np.sort(result.grid_expansion_costs.voltage_level.unique()), + ) + + for comparison_mode, comparison_result in results_dict.items(): + if mode != comparison_mode: + with pytest.raises(AssertionError): + assert_frame_equal( + result.equipment_changes, + comparison_result.equipment_changes, + ) diff --git a/tests/flex_opt/test_reinforce_measures.py b/tests/flex_opt/test_reinforce_measures.py index 140d10e03..e5b5cff65 100644 --- a/tests/flex_opt/test_reinforce_measures.py +++ b/tests/flex_opt/test_reinforce_measures.py @@ -8,13 +8,12 @@ class TestReinforceMeasures: @classmethod - def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) - self.edisgo.analyze() - self.timesteps = pd.date_range("1/1/1970", periods=2, freq="H") + def setup_class(cls): + cls.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + + cls.edisgo.set_time_series_worst_case_analysis() + cls.edisgo.analyze() + cls.timesteps = pd.date_range("1/1/1970", periods=2, freq="H") def test_reinforce_mv_lv_station_overloading(self): # implicitly checks function _station_overloading diff --git a/tests/io/test_ding0_import.py b/tests/io/test_ding0_import.py index 1e15ea5b7..7e130458b 100644 --- a/tests/io/test_ding0_import.py +++ b/tests/io/test_ding0_import.py @@ -42,127 +42,6 @@ def test_path_error(self): with pytest.raises(AssertionError, match=msg): ding0_import.import_ding0_grid("wrong_directory", self.topology) - def test_validate_ding0_grid_import(self): - """Test of validation of grids.""" - comps_dict = { - "buses": "BusBar_MVGrid_1_LVGrid_2_MV", - "generators": "GeneratorFluctuating_14", - "loads": "Load_residential_LVGrid_3_2", - "transformers": "LVStation_5_transformer_1", - "lines": "Line_10014", - "switches": "circuit_breaker_1", - } - # check duplicate node - for comp, name in comps_dict.items(): - new_comp = getattr(self.topology, "_{}_df".format(comp)).loc[name] - comps = getattr(self.topology, "_{}_df".format(comp)) - setattr(self.topology, "_{}_df".format(comp), comps.append(new_comp)) - try: - ding0_import._validate_ding0_grid_import(self.topology) - raise Exception( - "Appending components {} in check duplicate " - "did not work properly.".format(comp) - ) - except ValueError as e: - assert e.args[ - 0 - ] == "{} have duplicate entry in one of the components dataframes.".format( - name - ) - # reset dataframe - setattr(self.topology, "_{}_df".format(comp), comps) - ding0_import._validate_ding0_grid_import(self.topology) - - # check not connected generator and load - for nodal_component in ["loads", "generators"]: - comps = getattr(self.topology, "_{}_df".format(nodal_component)) - new_comp = comps.loc[comps_dict[nodal_component]] - new_comp.name = "new_nodal_component" - new_comp.bus = "Non_existent_bus_" + nodal_component - setattr( - self.topology, - "_{}_df".format(nodal_component), - comps.append(new_comp), - ) - try: - ding0_import._validate_ding0_grid_import(self.topology) - raise Exception( - "Appending components {} did not work " - "properly.".format(nodal_component) - ) - except ValueError as e: - assert e.args[ - 0 - ] == "The following {} have buses which are not defined: {}.".format( - nodal_component, new_comp.name - ) - # reset dataframe - setattr(self.topology, "_{}_df".format(nodal_component), comps) - ding0_import._validate_ding0_grid_import(self.topology) - - # check branch components - i = 0 - for branch_component in ["lines", "transformers"]: - comps = getattr(self.topology, "_{}_df".format(branch_component)) - new_comp = comps.loc[comps_dict[branch_component]] - new_comp.name = "new_branch_component" - setattr( - new_comp, - "bus" + str(i), - "Non_existent_bus_" + branch_component, - ) - setattr( - self.topology, - "_{}_df".format(branch_component), - comps.append(new_comp), - ) - try: - ding0_import._validate_ding0_grid_import(self.topology) - raise Exception( - "Appending components {} did not work " - "properly.".format(branch_component) - ) - except ValueError as e: - assert e.args[ - 0 - ] == "The following {} have bus{} which are not defined: {}.".format( - branch_component, i, new_comp.name - ) - # reset dataframe - setattr(self.topology, "_{}_df".format(branch_component), comps) - ding0_import._validate_ding0_grid_import(self.topology) - i += 1 - - # check switches - comps = self.topology.switches_df - for attr in ["bus_open", "bus_closed"]: - new_comp = comps.loc[comps_dict["switches"]] - new_comp.name = "new_switch" - new_comps = comps.append(new_comp) - new_comps.at[new_comp.name, attr] = "Non_existent_" + attr - self.topology.switches_df = new_comps - try: - ding0_import._validate_ding0_grid_import(self.topology) - raise Exception("Appending components switches did not work properly.") - except ValueError as e: - assert e.args[ - 0 - ] == "The following switches have {} which are not defined: {}.".format( - attr, new_comp.name - ) - self.topology.switches_df = comps - ding0_import._validate_ding0_grid_import(self.topology) - - # check isolated node - bus = self.topology.buses_df.loc[comps_dict["buses"]] - bus.name = "New_bus" - self.topology.buses_df = self.topology.buses_df.append(bus) - try: - ding0_import._validate_ding0_grid_import(self.topology) - raise Exception("Appending components buses did not work properly.") - except ValueError as e: - assert e.args[0] == "The following buses are isolated: {}.".format(bus.name) - def test_transformer_buses(self): assert ( self.topology.buses_df.loc[self.topology.transformers_df.bus1].v_nom.values diff --git a/tests/io/test_generators_import.py b/tests/io/test_generators_import.py index ad0ebafd2..21b5bf7f8 100644 --- a/tests/io/test_generators_import.py +++ b/tests/io/test_generators_import.py @@ -6,7 +6,6 @@ from edisgo import EDisGo from edisgo.io import generators_import as generators_import -from edisgo.network.grids import LVGrid class TestGeneratorsImport: @@ -19,10 +18,8 @@ class TestGeneratorsImport: @pytest.yield_fixture(autouse=True) def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() def test_update_grids(self): @@ -259,9 +256,9 @@ def test_oedb_without_timeseries(self): edisgo = EDisGo( ding0_grid=pytest.ding0_test_network_2_path, - worst_case_analysis="worst-case", generator_scenario="nep2035", ) + edisgo.set_time_series_worst_case_analysis() # check number of generators assert len(edisgo.topology.generators_df) == 18 + 1618 @@ -271,16 +268,15 @@ def test_oedb_without_timeseries(self): @pytest.mark.slow def test_oedb_with_worst_case_timeseries(self): - edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_2_path, - worst_case_analysis="worst-case", - ) + edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_2_path) + edisgo.set_time_series_worst_case_analysis() gens_before = edisgo.topology.generators_df.copy() gens_ts_active_before = edisgo.timeseries.generators_active_power.copy() gens_ts_reactive_before = edisgo.timeseries.generators_reactive_power.copy() edisgo.import_generators("nep2035") + edisgo.set_time_series_worst_case_analysis() # check number of generators assert len(edisgo.topology.generators_df) == 18 + 1618 @@ -392,14 +388,14 @@ def test_oedb_with_worst_case_timeseries(self): edisgo.timeseries.generators_active_power.loc[:, new_gen.name] / new_gen.p_nom ).tolist(), - [1, 0], + [0.0, 0.0, 1.0, 1.0], ).all() assert np.isclose( ( edisgo.timeseries.generators_reactive_power.loc[:, new_gen.name] / new_gen.p_nom ).tolist(), - [-np.tan(np.arccos(0.95)), 0], + [0.0, 0.0, -np.tan(np.arccos(0.95)), -np.tan(np.arccos(0.95))], ).all() @pytest.mark.slow @@ -415,17 +411,26 @@ def test_oedb_with_timeseries_by_technology(self): ) edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_2_path, - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_generation_fluctuating=ts_gen_fluctuating, - timeseries_load="demandlib", + ding0_grid=pytest.ding0_test_network_2_path, timeindex=timeindex ) + edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts=ts_gen_fluctuating, + dispatchable_generators_ts=ts_gen_dispatchable, + conventional_loads_ts="demandlib", + ) + edisgo.set_time_series_reactive_power_control() gens_before = edisgo.topology.generators_df.copy() gens_ts_active_before = edisgo.timeseries.generators_active_power.copy() gens_ts_reactive_before = edisgo.timeseries.generators_reactive_power.copy() edisgo.import_generators("nep2035") + edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts=ts_gen_fluctuating, + dispatchable_generators_ts=ts_gen_dispatchable, + conventional_loads_ts="demandlib", + ) + edisgo.set_time_series_reactive_power_control() # check number of generators assert len(edisgo.topology.generators_df) == 18 + 1618 diff --git a/tests/io/test_pypsa_io.py b/tests/io/test_pypsa_io.py index 1194a7996..85d777ba8 100644 --- a/tests/io/test_pypsa_io.py +++ b/tests/io/test_pypsa_io.py @@ -5,11 +5,21 @@ from pandas.util.testing import assert_frame_equal from edisgo import EDisGo -from edisgo.io.pypsa_io import _append_lv_components, process_pfa_results, set_seed +from edisgo.io import pypsa_io from edisgo.network.results import Results class TestPypsaIO: + def test_to_pypsa(self): + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() + timeindex = self.edisgo.timeseries.timeindex + pypsa_network = pypsa_io.to_pypsa(self.edisgo, timesteps=timeindex) + slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] + assert len(slack_df) == 1 + assert slack_df.bus.values[0] == "Bus_MVStation_1" + # ToDo: Check further things + def test_append_lv_components(self): lv_components = { "Load": pd.DataFrame(), @@ -18,15 +28,15 @@ def test_append_lv_components(self): } comps = pd.DataFrame({"bus": []}) # check if returns when comps is empty - _append_lv_components("Unkown", comps, lv_components, "TestGrid") + pypsa_io._append_lv_components("Unkown", comps, lv_components, "TestGrid") # check exceptions for wrong input parameters - comps = pd.DataFrame({"bus": ["bus1"]}, index=["dummy"]) + comps = pd.DataFrame({"bus": ["bus1"], "p_set": [0.1]}, index=["dummy"]) msg = "Component type not defined." with pytest.raises(ValueError, match=msg): - _append_lv_components("Unkown", comps, lv_components, "TestGrid") + pypsa_io._append_lv_components("Unkown", comps, lv_components, "TestGrid") msg = "Aggregation type for loads invalid." with pytest.raises(ValueError, match=msg): - _append_lv_components( + pypsa_io._append_lv_components( "Load", comps, lv_components, @@ -35,7 +45,7 @@ def test_append_lv_components(self): ) msg = "Aggregation type for generators invalid." with pytest.raises(ValueError, match=msg): - _append_lv_components( + pypsa_io._append_lv_components( "Generator", comps, lv_components, @@ -44,7 +54,7 @@ def test_append_lv_components(self): ) msg = "Aggregation type for storages invalid." with pytest.raises(ValueError, match=msg): - _append_lv_components( + pypsa_io._append_lv_components( "StorageUnit", comps, lv_components, @@ -71,7 +81,7 @@ def test_append_lv_components(self): ], ) # check not aggregated generators - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens, lv_components, @@ -90,7 +100,7 @@ def test_append_lv_components(self): ).all() # check aggregation of generators by type lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens, lv_components, @@ -112,7 +122,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [False, True, True]).all() # check if only one type is existing lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens.loc[gens.type == "solar"], lv_components, @@ -127,7 +137,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [True]).all() # check aggregation of generators by fluctuating or dispatchable lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens, lv_components, @@ -151,7 +161,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [True, False]).all() # check if only dispatchable gens are given lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens.loc[gens.type == "gas"], lv_components, @@ -166,7 +176,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [False]).all() # check if only fluctuating gens are given lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens.drop(gens.loc[gens.type == "gas"].index), lv_components, @@ -184,7 +194,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [True]).all() # check aggregation of all generators lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens, lv_components, @@ -206,7 +216,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == ["Mixed"]).all() # check only fluctuating lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens.drop(gens.loc[gens.type == "gas"].index), lv_components, @@ -228,7 +238,7 @@ def test_append_lv_components(self): assert (lv_components["Generator"].fluctuating == [True]).all() # check only dispatchable lv_components["Generator"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Generator", gens.loc[gens.type == "gas"], lv_components, @@ -250,7 +260,7 @@ def test_append_lv_components(self): loads = pd.DataFrame( { "bus": ["LVStation"] * 6, - "p_nom": [0.05, 0.23, 0.04, 0.2, 0.1, 0.4], + "p_set": [0.05, 0.23, 0.04, 0.2, 0.1, 0.4], "sector": [ "retail", "agricultural", @@ -270,17 +280,17 @@ def test_append_lv_components(self): ], ) # check not aggregated loads - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Load", loads, lv_components, "TestGrid", aggregate_loads=None ) assert len(aggr_dict) == 0 assert len(lv_components["Load"]) == 6 - assert (loads.p_nom.values == lv_components["Load"].p_set.values).all() + assert (loads.p_set.values == lv_components["Load"].p_set.values).all() assert (lv_components["Load"].bus == "LVStation").all() assert (lv_components["Load"].index == loads.index).all() # check aggregate loads by sector lv_components["Load"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Load", loads, lv_components, @@ -308,7 +318,7 @@ def test_append_lv_components(self): assert np.isclose(lv_components["Load"].p_set, [0.63, 0.1, 0.29]).all() # check if only one sector exists lv_components["Load"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Load", loads.loc[loads.sector == "industrial"], lv_components, @@ -323,7 +333,7 @@ def test_append_lv_components(self): assert np.isclose(lv_components["Load"].p_set, 0.1).all() # check aggregation of all loads lv_components["Load"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "Load", loads, lv_components, "TestGrid", aggregate_loads="all" ) assert len(aggr_dict) == 1 @@ -349,7 +359,7 @@ def test_append_lv_components(self): index=["Storage_1", "Storage_2"], ) # check appending without aggregation - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "StorageUnit", storages, lv_components, @@ -365,7 +375,7 @@ def test_append_lv_components(self): ).all() # check aggregration of all storages lv_components["StorageUnit"] = pd.DataFrame() - aggr_dict = _append_lv_components( + aggr_dict = pypsa_io._append_lv_components( "StorageUnit", storages, lv_components, @@ -383,10 +393,8 @@ def test_get_generators_timeseries_with_aggregated_elements(self): pass def test_set_seed(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() timeindex = self.edisgo.timeseries.timeindex # test with missing busses @@ -395,7 +403,7 @@ def test_set_seed(self): self.edisgo.analyze(timesteps=timeindex[0], mode="mv") # create pypsa network for first time step and all busses pypsa_network = self.edisgo.to_pypsa(timesteps=timeindex[0]) - set_seed(self.edisgo, pypsa_network) + pypsa_io.set_seed(self.edisgo, pypsa_network) # check that for LV busses default values are used and for MV busses # results from previous power flow @@ -407,24 +415,18 @@ def test_set_seed(self): pypsa_network.buses_t.v_mag_pu.loc[timeindex[0], mv_bus] == self.edisgo.results.pfa_v_mag_pu_seed.loc[timeindex[0], mv_bus] ) - assert np.isclose( - pypsa_network.buses_t.v_mag_pu.loc[timeindex[0], mv_bus], 1.00657 - ) assert ( pypsa_network.buses_t.v_ang.loc[timeindex[0], mv_bus] == self.edisgo.results.pfa_v_ang_seed.loc[timeindex[0], mv_bus] ) - assert np.isclose( - pypsa_network.buses_t.v_ang.loc[timeindex[0], mv_bus], 0.0195367 - ) # run power flow to check if it converges pypsa_network.pf(use_seed=True) # write results to edisgo object - process_pfa_results(self.edisgo, pypsa_network, timeindex) + pypsa_io.process_pfa_results(self.edisgo, pypsa_network, timeindex) # test with missing time steps pypsa_network = self.edisgo.to_pypsa() - set_seed(self.edisgo, pypsa_network) + pypsa_io.set_seed(self.edisgo, pypsa_network) # check that second time step default values are used and for first # time steps results from previous power flow @@ -461,7 +463,7 @@ def test_set_seed(self): self.edisgo.analyze(timesteps=timeindex[0]) self.edisgo.analyze(timesteps=timeindex[1]) pypsa_network = self.edisgo.to_pypsa() - set_seed(self.edisgo, pypsa_network) + pypsa_io.set_seed(self.edisgo, pypsa_network) # check that for both time steps results from previous power flow # analyses are used diff --git a/tests/io/test_timeseries_import.py b/tests/io/test_timeseries_import.py index d2fab8276..22dda37a7 100644 --- a/tests/io/test_timeseries_import.py +++ b/tests/io/test_timeseries_import.py @@ -38,9 +38,7 @@ def test_feedin_oedb(self): def test_import_load_timeseries(self): timeindex = pd.date_range("1/1/2018", periods=8760, freq="H") - load = timeseries_import.load_time_series_demandlib( - self.config, timeindex[0].year - ) + load = timeseries_import.load_time_series_demandlib(self.config, timeindex) assert ( load.columns == ["retail", "residential", "agricultural", "industrial"] ).all() diff --git a/tests/network/test_components.py b/tests/network/test_components.py index 3256a7ff3..fd09e8238 100644 --- a/tests/network/test_components.py +++ b/tests/network/test_components.py @@ -2,7 +2,7 @@ import pytest from edisgo import EDisGo -from edisgo.network.components import Generator, Load, Storage, Switch +from edisgo.network.components import Generator, Load, Switch class TestComponents: @@ -10,10 +10,8 @@ class TestComponents: @classmethod def setup_class(self): - self.edisgo_obj = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo_obj = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo_obj.set_time_series_worst_case_analysis() def test_load_class(self): """Test Load class getter, setter, methods""" @@ -22,7 +20,7 @@ def test_load_class(self): # test getter assert load.id == "Load_agricultural_LVGrid_1_1" - assert load.p_nom == 0.0523 + assert load.p_set == 0.0523 assert load.annual_consumption == 238 assert load.sector == "agricultural" assert load.bus == "Bus_BranchTee_LVGrid_1_2" @@ -33,8 +31,8 @@ def test_load_class(self): assert isinstance(load.reactive_power_timeseries, pd.Series) # test setter - load.p_nom = 0.06 - assert load.p_nom == 0.06 + load.p_set = 0.06 + assert load.p_set == 0.06 load.annual_consumption = 4 assert load.annual_consumption == 4 load.sector = "residential" @@ -46,7 +44,8 @@ def test_load_class(self): msg = "Given bus ID does not exist." with pytest.raises(AttributeError, match=msg): load.bus = "None" - # ToDo add test for active_power_timeseries and reactive_power_timeseries once implemented + # TODO: add test for active_power_timeseries and reactive_power_timeseries once + # implemented def test_generator_class(self): """Test Generator class getter, setter, methods""" @@ -83,7 +82,8 @@ def test_generator_class(self): msg = "Given bus ID does not exist." with pytest.raises(AttributeError, match=msg): gen.bus = "None" - # ToDo add test for active_power_timeseries and reactive_power_timeseries once implemented + # TODO: add test for active_power_timeseries and reactive_power_timeseries once + # implemented def test_switch_class(self): """Test Switch class""" diff --git a/tests/network/test_grids.py b/tests/network/test_grids.py index a46b6c8b3..d7006d3a0 100644 --- a/tests/network/test_grids.py +++ b/tests/network/test_grids.py @@ -46,7 +46,7 @@ def test_mv_grid(self): assert sorted(mv_grid.weather_cells) == [1122074, 1122075] assert mv_grid.peak_generation_capacity == 19.025 assert mv_grid.peak_generation_capacity_per_technology["solar"] == 4.6 - assert mv_grid.p_nom == 0.0 + assert mv_grid.p_set == 0.0 def test_lv_grid(self): """Test LVGrid class getter, setter, methods""" @@ -79,5 +79,5 @@ def test_lv_grid(self): assert sorted(lv_grid.weather_cells) == [] assert lv_grid.peak_generation_capacity == 0 assert lv_grid.peak_generation_capacity_per_technology.empty - assert lv_grid.p_nom == 0.054627 - assert lv_grid.p_nom_per_sector["agricultural"] == 0.051 + assert lv_grid.p_set == 0.054627 + assert lv_grid.p_set_per_sector["agricultural"] == 0.051 diff --git a/tests/network/test_timeseries.py b/tests/network/test_timeseries.py index 466dc2329..961c94674 100644 --- a/tests/network/test_timeseries.py +++ b/tests/network/test_timeseries.py @@ -1,3 +1,4 @@ +import logging import os import shutil @@ -9,1515 +10,2428 @@ from pandas.util.testing import assert_frame_equal, assert_series_equal -from edisgo.io import ding0_import +from edisgo import EDisGo from edisgo.network import timeseries -from edisgo.network.topology import Topology -from edisgo.tools.config import Config +from edisgo.tools.tools import assign_voltage_level_to_component class TestTimeSeries: - def test_timeindex(self): - timeseries_obj = timeseries.TimeSeries() - # test single time step - ind = pd.Timestamp("1/1/1970") - timeseries_obj.timeindex = ind - assert timeseries_obj.timeindex == pd.DatetimeIndex([ind]) - # test list of time steps - ind = ["1/1/1970", "1/2/1970"] - timeseries_obj.timeindex = ind - assert timeseries_obj.timeindex.equals(pd.DatetimeIndex(ind)) - # test DatetimeIndex - ind = ["1/1/1970", "1/2/1970"] - timeseries_obj.timeindex = pd.DatetimeIndex(ind) - assert timeseries_obj.timeindex.equals(pd.DatetimeIndex(ind)) + @pytest.fixture(autouse=True) + def setup_class(self): + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + + def test_timeseries_getters(self, caplog): + index_2 = pd.date_range("1/1/2018", periods=2, freq="H") + index_3 = pd.date_range("1/1/2018", periods=3, freq="H") + timeseries = pd.DataFrame(index=index_2, columns=["Comp_1"], data=[1.3, 2]) + self.edisgo.timeseries.timeindex = index_3 + for attribute in self.edisgo.timeseries._attributes: + assert_frame_equal( + getattr(self.edisgo.timeseries, attribute), pd.DataFrame(index=index_3) + ) + setattr(self.edisgo.timeseries, attribute, timeseries) + with caplog.at_level(logging.WARNING): + assert_frame_equal( + getattr(self.edisgo.timeseries, attribute), + pd.DataFrame(index=index_3), + ) + assert ( + "Timeindex and {} have deviating indices. " + "Empty dataframe will be returned.".format(attribute) in caplog.text + ) - def test_to_csv(self): - timeindex = pd.date_range("1/1/2018", periods=2, freq="H") - timeseries_obj = timeseries.TimeSeries(timeindex=timeindex) + def test_set_active_power_manual(self): # create dummy time series - loads_active_power = pd.DataFrame( - {"load1": [1.4, 2.3], "load2": [2.4, 1.3]}, index=timeindex - ) - timeseries_obj.loads_active_power = loads_active_power - generators_reactive_power = pd.DataFrame( - {"gen1": [1.4, 2.3], "gen2": [2.4, 1.3]}, index=timeindex + index_2 = pd.date_range("1/1/2018", periods=2, freq="H") + index_3 = pd.date_range("1/1/2018", periods=3, freq="H") + dummy_ts_1 = pd.Series([1.4, 2.3], index=index_2) + dummy_ts_2 = pd.Series([1.4, 2.3, 1.5], index=index_3) + # set TimeSeries timeindex + self.edisgo.timeseries.timeindex = index_2 + + # test only existing components without prior time series being set + self.edisgo.timeseries.set_active_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame({"GeneratorFluctuating_8": dummy_ts_1}), + ts_loads=pd.DataFrame( + { + "Load_residential_LVGrid_8_6": dummy_ts_2, + "Load_residential_LVGrid_7_2": dummy_ts_2, + } + ), + ts_storage_units=pd.DataFrame({"Storage_1": dummy_ts_2}), ) - timeseries_obj.generators_reactive_power = generators_reactive_power - - # test with default values - dir = os.path.join(os.getcwd(), "timeseries_csv") - timeseries_obj.to_csv(dir) - - files_in_timeseries_dir = os.listdir(dir) - assert len(files_in_timeseries_dir) == 2 - assert "loads_active_power.csv" in files_in_timeseries_dir - assert "generators_reactive_power.csv" in files_in_timeseries_dir - - shutil.rmtree(dir) - - # test with reduce memory True - timeseries_obj.to_csv(dir, reduce_memory=True) - - assert timeseries_obj.loads_active_power.load1.dtype == "float32" - - shutil.rmtree(dir, ignore_errors=True) - - def test_from_csv(self): - timeindex = pd.date_range("1/1/2018", periods=2, freq="H") - timeseries_obj = timeseries.TimeSeries(timeindex=timeindex) + assert self.edisgo.timeseries.generators_active_power.shape == (2, 1) + assert ( + self.edisgo.timeseries.generators_active_power.loc[ + :, "GeneratorFluctuating_8" + ] + == dummy_ts_1 + ).all() + assert self.edisgo.timeseries.loads_active_power.shape == (2, 2) + assert ( + self.edisgo.timeseries._loads_active_power.loc[ + :, "Load_residential_LVGrid_8_6" + ] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.loads_active_power.loc[ + :, "Load_residential_LVGrid_7_2" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.storage_units_active_power.shape == (2, 1) + assert ( + self.edisgo.timeseries._storage_units_active_power.loc[:, "Storage_1"] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.storage_units_active_power.loc[:, "Storage_1"] + == dummy_ts_2.loc[index_2] + ).all() - # create dummy time series - loads_active_power = pd.DataFrame( - {"load1": [1.4, 2.3], "load2": [2.4, 1.3]}, index=timeindex - ) - timeseries_obj.loads_active_power = loads_active_power - generators_reactive_power = pd.DataFrame( - {"gen1": [1.4, 2.3], "gen2": [2.4, 1.3]}, index=timeindex + # test overwriting and adding time series + self.edisgo.timeseries.set_active_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame( + { + "GeneratorFluctuating_8": dummy_ts_2, + "GeneratorFluctuating_17": dummy_ts_2, + } + ), + ts_loads=pd.DataFrame( + { + "Load_residential_LVGrid_8_6": dummy_ts_1, + "Load_residential_LVGrid_1_4": dummy_ts_1, + } + ), + ts_storage_units=pd.DataFrame({"Storage_1": dummy_ts_1}), ) - timeseries_obj.generators_reactive_power = generators_reactive_power - - # write to csv - dir = os.path.join(os.getcwd(), "timeseries_csv") - timeseries_obj.to_csv(dir) - - # reset TimeSeries - timeseries_obj = timeseries.TimeSeries() - - timeseries_obj.from_csv(dir) + assert self.edisgo.timeseries.generators_active_power.shape == (2, 2) + assert ( + self.edisgo.timeseries.generators_active_power.loc[ + :, "GeneratorFluctuating_8" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert ( + self.edisgo.timeseries._generators_active_power.loc[ + :, "GeneratorFluctuating_17" + ] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.generators_active_power.loc[ + :, "GeneratorFluctuating_17" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.loads_active_power.shape == (2, 3) + assert ( + self.edisgo.timeseries._loads_active_power.loc[ + :, "Load_residential_LVGrid_8_6" + ] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries._loads_active_power.loc[ + :, "Load_residential_LVGrid_1_4" + ] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries.loads_active_power.loc[ + :, "Load_residential_LVGrid_7_2" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.storage_units_active_power.shape == (2, 1) + assert ( + self.edisgo.timeseries._storage_units_active_power.loc[:, "Storage_1"] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries.storage_units_active_power.loc[:, "Storage_1"] + == dummy_ts_1 + ).all() - pd.testing.assert_frame_equal( - timeseries_obj.loads_active_power, - loads_active_power, - check_freq=False, + # test non-existent components + self.edisgo.timeseries.set_active_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame( + {"Dummy_gen_1": dummy_ts_2, "GeneratorFluctuating_27": dummy_ts_2} + ), + ts_loads=pd.DataFrame( + {"Dummy_load_1": dummy_ts_1, "Load_agricultural_LVGrid_1_3": dummy_ts_1} + ), + ts_storage_units=pd.DataFrame({"Dummy_storage_1": dummy_ts_1}), ) - pd.testing.assert_frame_equal( - timeseries_obj.generators_reactive_power, - generators_reactive_power, - check_freq=False, + assert self.edisgo.timeseries.generators_active_power.shape == (2, 3) + assert ( + "Dummy_gen_1" not in self.edisgo.timeseries.generators_active_power.columns + ) + assert ( + self.edisgo.timeseries._generators_active_power.loc[ + :, "GeneratorFluctuating_27" + ] + == dummy_ts_2 + ).all() + assert self.edisgo.timeseries.loads_active_power.shape == (2, 4) + assert "Dummy_load_1" not in self.edisgo.timeseries.loads_active_power.columns + assert ( + self.edisgo.timeseries.loads_active_power.loc[ + :, "Load_agricultural_LVGrid_1_3" + ] + == dummy_ts_1 + ).all() + assert self.edisgo.timeseries.storage_units_active_power.shape == (2, 1) + assert ( + "Dummy_storage_1" + not in self.edisgo.timeseries.storage_units_active_power.columns ) - shutil.rmtree(dir) - - -class Test_get_component_timeseries: - @classmethod - def setup_class(self): - self.topology = Topology() - self.timeseries = timeseries.TimeSeries() - self.config = Config() - ding0_import.import_ding0_grid(pytest.ding0_test_network_path, self) - - def test_timeseries_imported(self): - # test storage ts - storage_1 = self.topology.add_storage_unit("Bus_MVStation_1", 0.3) - storage_2 = self.topology.add_storage_unit("Bus_GeneratorFluctuating_2", 0.45) - storage_3 = self.topology.add_storage_unit("Bus_BranchTee_LVGrid_1_10", 0.05) - - timeindex = pd.date_range("1/1/2011", periods=8760, freq="H") - ts_gen_dispatchable = pd.DataFrame({"other": [0.775] * 8760}, index=timeindex) - # test error raising in case of missing ts for dispatchable gens - msg = 'Your input for "timeseries_generation_dispatchable" is not valid.' - with pytest.raises(ValueError, match=msg): - timeseries.get_component_timeseries( - edisgo_obj=self, timeseries_generation_fluctuating="oedb" - ) - # test error raising in case of missing ts for loads - msg = "Your input for 'timeseries_load' is not valid." - with pytest.raises(ValueError, match=msg): - timeseries.get_component_timeseries( - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - ) - - msg = "No timeseries for storage units provided." - with pytest.raises(ValueError, match=msg): - timeseries.get_component_timeseries( - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - ) - - msg = ( - "Columns or indices of inserted storage timeseries do not match " - "topology and timeindex." - ) - with pytest.raises(ValueError, match=msg): - timeseries.get_component_timeseries( - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=pd.DataFrame(), - ) - - storage_ts = pd.concat( - [self.topology.storage_units_df.p_nom] * 8760, - axis=1, - keys=timeindex, - ).T - timeseries.get_component_timeseries( - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=storage_ts, - ) - - # Todo: test with inserted reactive generation and/or reactive load - - # remove storages - self.topology.remove_storage_unit(storage_1) - self.topology.remove_storage_unit(storage_2) - self.topology.remove_storage_unit(storage_3) - - def test_worst_case(self): - """Test creation of worst case time series""" - # test storage ts - storage_1 = self.topology.add_storage_unit("Bus_MVStation_1", 0.3) - # storage_2 = self.topology.add_storage_unit( - # 'Bus_GeneratorFluctuating_2', 0.45) - storage_3 = self.topology.add_storage_unit("Bus_BranchTee_LVGrid_1_10", 0.05) - - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case") - - # check type - assert isinstance(self.timeseries.generators_active_power, pd.DataFrame) - assert isinstance(self.timeseries.generators_reactive_power, pd.DataFrame) - assert isinstance(self.timeseries.loads_active_power, pd.DataFrame) - assert isinstance(self.timeseries.loads_reactive_power, pd.DataFrame) - assert isinstance(self.timeseries.storage_units_active_power, pd.DataFrame) - assert isinstance(self.timeseries.storage_units_reactive_power, pd.DataFrame) + def test_set_reactive_power_manual(self): - # check shape - number_of_timesteps = len(self.timeseries.timeindex) - number_of_cols = len(self.topology.generators_df.index) - assert self.timeseries.generators_active_power.shape == ( - number_of_timesteps, - number_of_cols, - ) - assert self.timeseries.generators_reactive_power.shape == ( - number_of_timesteps, - number_of_cols, + # create dummy time series + index_2 = pd.date_range("1/1/2018", periods=2, freq="H") + index_3 = pd.date_range("1/1/2018", periods=3, freq="H") + dummy_ts_1 = pd.Series([1.4, 2.3], index=index_2) + dummy_ts_2 = pd.Series([1.4, 2.3, 1.5], index=index_3) + # set TimeSeries timeindex + self.edisgo.timeseries.timeindex = index_2 + + # test only existing components without prior time series being set + self.edisgo.timeseries.set_reactive_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame({"GeneratorFluctuating_8": dummy_ts_1}), + ts_loads=pd.DataFrame( + { + "Load_residential_LVGrid_8_6": dummy_ts_2, + "Load_residential_LVGrid_7_2": dummy_ts_2, + } + ), + ts_storage_units=pd.DataFrame({"Storage_1": dummy_ts_2}), ) - number_of_cols = len(self.topology.loads_df.index) - assert self.timeseries.loads_active_power.shape == ( - number_of_timesteps, - number_of_cols, + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 1) + assert ( + self.edisgo.timeseries.generators_reactive_power.loc[ + :, "GeneratorFluctuating_8" + ] + == dummy_ts_1 + ).all() + assert self.edisgo.timeseries.loads_reactive_power.shape == (2, 2) + assert ( + self.edisgo.timeseries._loads_reactive_power.loc[ + :, "Load_residential_LVGrid_8_6" + ] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.loads_reactive_power.loc[ + :, "Load_residential_LVGrid_7_2" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (2, 1) + assert ( + self.edisgo.timeseries._storage_units_reactive_power.loc[:, "Storage_1"] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.storage_units_reactive_power.loc[:, "Storage_1"] + == dummy_ts_2.loc[index_2] + ).all() + + # test overwriting and adding time series + self.edisgo.timeseries.set_reactive_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame( + { + "GeneratorFluctuating_8": dummy_ts_2, + "GeneratorFluctuating_17": dummy_ts_2, + } + ), + ts_loads=pd.DataFrame( + { + "Load_residential_LVGrid_8_6": dummy_ts_1, + "Load_residential_LVGrid_1_4": dummy_ts_1, + } + ), + ts_storage_units=pd.DataFrame({"Storage_1": dummy_ts_1}), ) - assert self.timeseries.loads_reactive_power.shape == ( - number_of_timesteps, - number_of_cols, + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 2) + assert ( + self.edisgo.timeseries.generators_reactive_power.loc[ + :, "GeneratorFluctuating_8" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert ( + self.edisgo.timeseries._generators_reactive_power.loc[ + :, "GeneratorFluctuating_17" + ] + == dummy_ts_2 + ).all() + assert ( + self.edisgo.timeseries.generators_reactive_power.loc[ + :, "GeneratorFluctuating_17" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.loads_reactive_power.shape == (2, 3) + assert ( + self.edisgo.timeseries._loads_reactive_power.loc[ + :, "Load_residential_LVGrid_8_6" + ] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries._loads_reactive_power.loc[ + :, "Load_residential_LVGrid_1_4" + ] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries.loads_reactive_power.loc[ + :, "Load_residential_LVGrid_7_2" + ] + == dummy_ts_2.loc[index_2] + ).all() + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (2, 1) + assert ( + self.edisgo.timeseries._storage_units_reactive_power.loc[:, "Storage_1"] + == dummy_ts_1 + ).all() + assert ( + self.edisgo.timeseries.storage_units_reactive_power.loc[:, "Storage_1"] + == dummy_ts_1 + ).all() + + # test non-existent components + self.edisgo.timeseries.set_reactive_power_manual( + edisgo_object=self.edisgo, + ts_generators=pd.DataFrame( + {"Dummy_gen_1": dummy_ts_2, "GeneratorFluctuating_27": dummy_ts_2} + ), + ts_loads=pd.DataFrame( + {"Dummy_load_1": dummy_ts_1, "Load_agricultural_LVGrid_1_3": dummy_ts_1} + ), + ts_storage_units=pd.DataFrame({"Dummy_storage_1": dummy_ts_1}), ) - number_of_cols = len(self.topology.storage_units_df.index) - assert self.timeseries.storage_units_active_power.shape == ( - number_of_timesteps, - number_of_cols, + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 3) + assert ( + "Dummy_gen_1" + not in self.edisgo.timeseries.generators_reactive_power.columns ) - assert self.timeseries.storage_units_reactive_power.shape == ( - number_of_timesteps, - number_of_cols, + assert ( + self.edisgo.timeseries._generators_reactive_power.loc[ + :, "GeneratorFluctuating_27" + ] + == dummy_ts_2 + ).all() + assert self.edisgo.timeseries.loads_reactive_power.shape == (2, 4) + assert "Dummy_load_1" not in self.edisgo.timeseries.loads_reactive_power.columns + assert ( + self.edisgo.timeseries.loads_reactive_power.loc[ + :, "Load_agricultural_LVGrid_1_3" + ] + == dummy_ts_1 + ).all() + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (2, 1) + assert ( + "Dummy_storage_1" + not in self.edisgo.timeseries.storage_units_reactive_power.columns ) - # value - gen = "Generator_1" # gas, mv - exp = pd.Series( - data=[1 * 0.775, 0 * 0.775], - name=gen, - index=self.timeseries.timeindex, - ) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.9)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf - ) + def test_set_worst_case(self): - gen = "GeneratorFluctuating_2" # wind, mv - exp = pd.Series( - data=[1 * 2.3, 0 * 2.3], name=gen, index=self.timeseries.timeindex - ) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.9)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf - ) + # test - check if right functions are called for all components - gen = "GeneratorFluctuating_3" # solar, mv - exp = pd.Series( - data=[0.85 * 2.67, 0 * 2.67], - name=gen, - index=self.timeseries.timeindex, - ) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.9)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf - ) + # change load types to have charging point, heat pump and load without set + # type in the network + self.edisgo.topology._loads_df.loc[ + "Load_residential_LVGrid_1_4", ["type", "sector"] + ] = ("charging_point", "hpc") + self.edisgo.topology._loads_df.at[ + "Load_retail_MVGrid_1_Load_aggregated_retail_MVGrid_1_1", "type" + ] = "heat_pump" + self.edisgo.topology._loads_df.at["Load_agricultural_LVGrid_8_1", "type"] = None - gen = "GeneratorFluctuating_20" # solar, lv - exp = pd.Series( - data=[0.85 * 0.005, 0 * 0.005], - name=gen, - index=self.timeseries.timeindex, - ) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.95)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf + self.edisgo.timeseries.set_worst_case( + self.edisgo, cases=["feed-in_case", "load_case"] ) - load = "Load_retail_MVGrid_1_Load_aggregated_retail_MVGrid_1_1" # retail, mv + timeindex = pd.date_range("1/1/1970", periods=4, freq="H") + # check generator + comp = "Generator_1" # gas, mv + p_nom = 0.775 exp = pd.Series( - data=[0.1 * 0.31, 1.0 * 0.31], - name=load, - index=self.timeseries.timeindex, + data=[1.0 * p_nom, 1.0 * p_nom, 0.0, 0.0], + name=comp, + index=timeindex, ) assert_series_equal( - self.timeseries.loads_active_power.loc[:, load], + self.edisgo.timeseries.generators_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = tan(acos(0.95)) + pf = -tan(acos(0.9)) assert_series_equal( - self.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.generators_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - load = "Load_agricultural_LVGrid_1_2" # agricultural, lv + # check conventional load + comp = "Load_agricultural_LVGrid_1_1" # lv + p_set = 0.0523 exp = pd.Series( - data=[0.1 * 0.0523, 1.0 * 0.0523], - name=load, - index=self.timeseries.timeindex, + data=[0.15 * p_set, 0.1 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=timeindex, ) assert_series_equal( - self.timeseries.loads_active_power.loc[:, load], + self.edisgo.timeseries.loads_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) pf = tan(acos(0.95)) assert_series_equal( - self.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.loads_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - load = "Load_residential_LVGrid_3_3" # residential, lv + # check charging point + comp = "Load_residential_LVGrid_1_4" # lv, hpc + p_set = 0.001397 exp = pd.Series( - data=[0.1 * 0.001209, 1.0 * 0.001209], - name=load, - index=self.timeseries.timeindex, + data=[0.15 * p_set, 0.0 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=timeindex, ) assert_series_equal( - self.timeseries.loads_active_power.loc[:, load], + self.edisgo.timeseries.loads_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = tan(acos(0.95)) + pf = tan(acos(1.0)) assert_series_equal( - self.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.loads_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - storage = storage_1 # storage, mv + # check heat pump + comp = "Load_retail_MVGrid_1_Load_aggregated_retail_MVGrid_1_1" # mv + p_set = 0.31 exp = pd.Series( - data=[1 * 0.3, -1 * 0.3], - name=storage, - index=self.timeseries.timeindex, + data=[0.0 * p_set, 0.0 * p_set, 0.8 * p_set, 1.0 * p_set], + name=comp, + index=timeindex, ) - assert_series_equal( - self.timeseries.storage_units_active_power.loc[:, storage], + self.edisgo.timeseries.loads_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = -tan(acos(0.9)) + pf = tan(acos(1.0)) assert_series_equal( - self.timeseries.storage_units_reactive_power.loc[:, storage], + self.edisgo.timeseries.loads_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - storage = storage_3 # storage, lv + # check load without type specification + comp = "Load_agricultural_LVGrid_8_1" # lv + p_set = 0.0478 exp = pd.Series( - data=[1 * 0.05, -1 * 0.05], - name=storage, - index=self.timeseries.timeindex, + data=[0.15 * p_set, 0.1 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=timeindex, ) - assert_series_equal( - self.timeseries.storage_units_active_power.loc[:, storage], + self.edisgo.timeseries.loads_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = -tan(acos(0.95)) + pf = tan(acos(0.95)) assert_series_equal( - self.timeseries.storage_units_reactive_power.loc[:, storage], + self.edisgo.timeseries.loads_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - # remove storages - self.topology.remove_storage_unit(storage_1) - # self.topology.remove_storage_unit(storage_2) - self.topology.remove_storage_unit(storage_3) - - # test for only feed-in case - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case-feedin") - - # value - gen = "Generator_1" # gas, mv - exp = pd.Series(data=[1 * 0.775], name=gen, index=self.timeseries.timeindex) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.9)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf - ) - load = "Load_retail_LVGrid_9_14" # industrial, lv + # check storage + comp = "Storage_1" + p_nom = 0.4 exp = pd.Series( - data=[0.1 * 0.001222], name=load, index=self.timeseries.timeindex + data=[1.0 * p_nom, 1.0 * p_nom, -1.0 * p_nom, -1.0 * p_nom], + name=comp, + index=timeindex, ) assert_series_equal( - self.timeseries.loads_active_power.loc[:, load], + self.edisgo.timeseries.storage_units_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = tan(acos(0.95)) + pf = -tan(acos(0.9)) assert_series_equal( - self.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.storage_units_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - # test for only load case - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case-load") + assert self.edisgo.timeseries.generators_active_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), + ) + assert self.edisgo.timeseries.generators_reactive_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), + ) + assert self.edisgo.timeseries.loads_active_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), + ) + assert self.edisgo.timeseries.loads_reactive_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), + ) + assert self.edisgo.timeseries.storage_units_active_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), + ) + assert self.edisgo.timeseries.storage_units_reactive_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), + ) - gen = "Generator_1" # gas, mv - exp = pd.Series(data=[0 * 0.775], name=gen, index=self.timeseries.timeindex) - assert_series_equal(self.timeseries.generators_active_power.loc[:, gen], exp) - pf = -tan(acos(0.9)) - assert_series_equal( - self.timeseries.generators_reactive_power.loc[:, gen], exp * pf + # ############################################################################# + # test with components that do not exist and setting only one case + self.edisgo.timeseries.set_worst_case( + self.edisgo, + cases=["load_case"], + generators_names=["genX", "GeneratorFluctuating_8"], + loads_names=[], + storage_units_names=[], ) - load = "Load_retail_LVGrid_9_14" # industrial, lv + + comp = "GeneratorFluctuating_8" exp = pd.Series( - data=[1.0 * 0.001222], name=load, index=self.timeseries.timeindex + data=[np.nan, np.nan, 0.0, 0.0], + name=comp, + index=timeindex, ) assert_series_equal( - self.timeseries.loads_active_power.loc[:, load], + self.edisgo.timeseries.generators_active_power.loc[:, comp], exp, - check_exact=False, check_dtype=False, ) - pf = tan(acos(0.95)) + pf = -tan(acos(0.9)) assert_series_equal( - self.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.generators_reactive_power.loc[:, comp], exp * pf, - check_exact=False, check_dtype=False, ) - - # test error raising in case of missing load/generator parameter - - gen = "GeneratorFluctuating_14" - val_pre = self.topology._generators_df.at[gen, "bus"] - self.topology._generators_df.at[gen, "bus"] = None - with pytest.raises(AttributeError, match=gen): - timeseries._worst_case_generation(self, modes=None) - self.topology._generators_df.at[gen, "bus"] = val_pre - gen = "GeneratorFluctuating_24" - val_pre = self.topology._generators_df.at[gen, "p_nom"] - self.topology._generators_df.at[gen, "p_nom"] = None - with pytest.raises(AttributeError, match=gen): - timeseries._worst_case_generation(self, modes=None) - self.topology._generators_df.at[gen, "p_nom"] = val_pre - load = "Load_agricultural_LVGrid_1_1" - val_pre = self.topology._loads_df.at[load, "p_nom"] - self.topology._loads_df.at[load, "p_nom"] = None - with pytest.raises(AttributeError, match=load): - timeseries._worst_case_load(self, modes=None) - self.topology._loads_df.at[load, "p_nom"] = val_pre - - # test no other generators - - def test_add_loads_timeseries(self): - """Test method add_loads_timeseries""" - p_nom = 2.3 - annual_consumption = 3.4 - num_loads = len(self.topology.loads_df) - # add single load for which timeseries is added - # test worst-case - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case") - load_name = self.topology.add_load( - load_id=4, - bus="Bus_MVStation_1", - p_nom=p_nom, - annual_consumption=annual_consumption, - sector="retail", - ) - timeseries.add_loads_timeseries(self, load_name) - active_power_new_load = self.timeseries.loads_active_power.loc[ - :, ["Load_MVGrid_1_retail_4"] - ] - timeindex = pd.date_range("1/1/1970", periods=2, freq="H") - assert self.timeseries.loads_active_power.shape == (2, num_loads + 1) - assert self.timeseries.loads_reactive_power.shape == (2, num_loads + 1) - assert (active_power_new_load.index == timeindex).all() - assert np.isclose( - active_power_new_load.loc[timeindex[0], load_name], (0.15 * p_nom) - ) - assert np.isclose(active_power_new_load.loc[timeindex[1], load_name], p_nom) - self.topology.remove_load(load_name) - - # test manual - timeindex = pd.date_range("1/1/2018", periods=24, freq="H") - ( - generators_active_power, - generators_reactive_power, - loads_active_power, - loads_reactive_power, - storage_units_active_power, - storage_units_reactive_power, - ) = self.create_random_timeseries_for_topology(timeindex) - - timeseries.get_component_timeseries( - edisgo_obj=self, - mode="manual", - timeindex=timeindex, - loads_active_power=loads_active_power, - loads_reactive_power=loads_reactive_power, - generators_active_power=generators_active_power, - generators_reactive_power=generators_reactive_power, - storage_units_active_power=storage_units_active_power, - storage_units_reactive_power=storage_units_reactive_power, - ) - - load_name = self.topology.add_load( - load_id=4, - bus="Bus_MVStation_1", - p_nom=p_nom, - annual_consumption=annual_consumption, - sector="retail", - ) - new_load_active_power = pd.DataFrame( - index=timeindex, - columns=[load_name], - data=([p_nom] * len(timeindex)), + assert self.edisgo.timeseries.generators_active_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), ) - new_load_reactive_power = pd.DataFrame( - index=timeindex, - columns=[load_name], - data=([p_nom * 0.5] * len(timeindex)), - ) - timeseries.add_loads_timeseries( - self, - load_name, - loads_active_power=new_load_active_power, - loads_reactive_power=new_load_reactive_power, - ) - active_power = self.timeseries.loads_active_power[load_name] - reactive_power = self.timeseries.loads_reactive_power[load_name] - assert (active_power.values == p_nom).all() - assert (reactive_power.values == p_nom * 0.5).all() - assert self.timeseries.loads_active_power.shape == (24, num_loads + 1) - assert self.timeseries.loads_reactive_power.shape == ( - 24, - num_loads + 1, - ) - - self.topology.remove_load(load_name) - - # test import timeseries from dbs - timeindex = pd.date_range("1/1/2011", periods=24, freq="H") - ts_gen_dispatchable = pd.DataFrame({"other": [0.775] * 24}, index=timeindex) - storage_units_active_power.index = timeindex - timeseries.get_component_timeseries( - timeindex=timeindex, - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=storage_units_active_power, - ) - - load_name = self.topology.add_load( - load_id=4, - bus="Bus_MVStation_1", - p_nom=p_nom, - annual_consumption=annual_consumption, - sector="retail", - ) - timeseries.add_loads_timeseries(self, load_name) - active_power = self.timeseries.loads_active_power[load_name] - reactive_power = self.timeseries.loads_reactive_power[load_name] - assert np.isclose( - active_power.iloc[4], (4.150392788534633e-05 * annual_consumption) + assert self.edisgo.timeseries.generators_reactive_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), ) - assert np.isclose( - reactive_power.iloc[13], - (7.937985538711569e-05 * annual_consumption * tan(acos(0.9))), - ) - - assert self.timeseries.loads_active_power.shape == (24, num_loads + 1) - assert self.timeseries.loads_reactive_power.shape == ( - 24, - num_loads + 1, - ) - self.topology.remove_load(load_name) - # Todo: add more than one load - - def test_add_generators_timeseries(self): - """Test add_generators_timeseries method""" - # TEST WORST-CASE - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case") - num_gens = len(self.topology.generators_df) - timeindex = pd.date_range("1/1/1970", periods=2, freq="H") - # add single generator - p_nom = 1.7 - gen_name = self.topology.add_generator( - generator_id=5, - p_nom=p_nom, - bus="Bus_BranchTee_LVGrid_1_7", - generator_type="solar", - ) - timeseries.add_generators_timeseries(self, gen_name) - assert self.timeseries.generators_active_power.shape == ( - 2, - num_gens + 1, - ) - assert self.timeseries.generators_reactive_power.shape == ( - 2, - num_gens + 1, - ) - assert (self.timeseries.generators_active_power.index == timeindex).all() - assert ( - self.timeseries.generators_active_power.loc[timeindex, gen_name].values - == [0.85 * p_nom, 0] - ).all() - assert np.isclose( - np.array( - self.timeseries.generators_reactive_power.loc[ - timeindex, gen_name - ].values, - dtype=float, - ), - [-tan(acos(0.95)) * 0.85 * p_nom, 0], - ).all() - # add multiple generators and check - p_nom2 = 1.3 - gen_name2 = self.topology.add_generator( - generator_id=2, - p_nom=p_nom2, - bus="Bus_Generator_1", - generator_type="gas", - ) - p_nom3 = 2.4 - gen_name3 = self.topology.add_generator( - generator_id=6, - p_nom=p_nom3, - bus="Bus_BranchTee_LVGrid_1_14", - generator_type="hydro", - ) - timeseries.add_generators_timeseries(self, [gen_name2, gen_name3]) - # check expected values - assert self.timeseries.generators_active_power.shape == ( - 2, - num_gens + 3, - ) - assert self.timeseries.generators_reactive_power.shape == ( - 2, - num_gens + 3, + assert self.edisgo.timeseries.loads_active_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), ) - assert np.isclose( - np.array( - self.timeseries.generators_active_power.loc[ - timeindex, [gen_name2, gen_name3] - ].values, - dtype=float, - ), - [[p_nom2, p_nom3], [0, 0]], - ).all() - assert np.isclose( - np.array( - self.timeseries.generators_reactive_power.loc[ - timeindex, [gen_name2, gen_name3] - ].values, - dtype=float, - ), - [[-p_nom2 * tan(acos(0.9)), -p_nom3 * tan(acos(0.95))], [0, 0]], - ).all() - # remove added generators - self.topology.remove_generator(gen_name) - self.topology.remove_generator(gen_name2) - self.topology.remove_generator(gen_name3) - # TEST MANUAL - timeindex = pd.date_range("1/1/2018", periods=24, freq="H") - ( - generators_active_power, - generators_reactive_power, - loads_active_power, - loads_reactive_power, - storage_units_active_power, - storage_units_reactive_power, - ) = self.create_random_timeseries_for_topology(timeindex) - - timeseries.get_component_timeseries( - edisgo_obj=self, - mode="manual", - timeindex=timeindex, - loads_active_power=loads_active_power, - loads_reactive_power=loads_reactive_power, - generators_active_power=generators_active_power, - generators_reactive_power=generators_reactive_power, - storage_units_active_power=storage_units_active_power, - storage_units_reactive_power=storage_units_reactive_power, - ) - # add single mv solar generator - gen_name = self.topology.add_generator( - generator_id=5, - p_nom=p_nom, - bus="Bus_BranchTee_LVGrid_1_7", - generator_type="solar", - ) - new_gen_active_power = pd.DataFrame( - index=timeindex, - columns=[gen_name], - data=([p_nom * 0.97] * len(timeindex)), + assert self.edisgo.timeseries.loads_reactive_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), ) - new_gen_reactive_power = pd.DataFrame( - index=timeindex, - columns=[gen_name], - data=([p_nom * 0.5] * len(timeindex)), + assert self.edisgo.timeseries.storage_units_active_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), ) - timeseries.add_generators_timeseries( - self, - gen_name, - generators_active_power=new_gen_active_power, - generators_reactive_power=new_gen_reactive_power, + assert self.edisgo.timeseries.storage_units_reactive_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), ) - # check expected values - assert self.timeseries.generators_active_power.shape == ( - 24, - num_gens + 1, + + # ############################################################################# + # test reset of time series - set other time series before and only set + # worst case time series for other components + timeindex = pd.date_range("1/1/2018", periods=2, freq="H") + self.edisgo.timeseries.timeindex = timeindex + self.edisgo.timeseries._generators_active_power = pd.DataFrame( + {"Generator_1": [1.4, 2.3]}, index=timeindex ) - assert self.timeseries.generators_reactive_power.shape == ( - 24, - num_gens + 1, + self.edisgo.timeseries.set_worst_case( + self.edisgo, + cases=["load_case"], + generators_names=["GeneratorFluctuating_8"], ) - assert (self.timeseries.generators_active_power.index == timeindex).all() assert ( - self.timeseries.generators_active_power.loc[timeindex, gen_name].values - == 0.97 * p_nom - ).all() - assert np.isclose( - self.timeseries.generators_reactive_power.loc[timeindex, gen_name], - p_nom * 0.5, - ).all() - # add multiple generators and check - p_nom2 = 1.3 - gen_name2 = self.topology.add_generator( - generator_id=2, - p_nom=p_nom2, - bus="Bus_Generator_1", - generator_type="gas", - ) - p_nom3 = 2.4 - gen_name3 = self.topology.add_generator( - generator_id=6, - p_nom=p_nom3, - bus="Bus_BranchTee_LVGrid_1_14", - generator_type="hydro", - ) - new_gens_active_power = pd.DataFrame( - index=timeindex, - columns=[gen_name2, gen_name3], - data=( - np.array([[p_nom2 * 0.97], [p_nom3 * 0.98]]) - .repeat(len(timeindex), axis=1) - .T - ), + "GeneratorFluctuating_8" + in self.edisgo.timeseries.generators_active_power.columns ) - new_gens_reactive_power = pd.DataFrame( - index=timeindex, - columns=[gen_name2, gen_name3], - data=( - np.array([[p_nom2 * 0.5], [p_nom3 * 0.4]]) - .repeat(len(timeindex), axis=1) - .T - ), - ) - timeseries.add_generators_timeseries( - self, - [gen_name2, gen_name3], - generators_active_power=new_gens_active_power, - generators_reactive_power=new_gens_reactive_power, + assert self.edisgo.timeseries.generators_active_power.shape == (2, 1) + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 1) + + # ############################################################################# + # test setting other case now to see if time index is set correctly + self.edisgo.timeseries.set_worst_case( + self.edisgo, + cases=["feed-in_case"], + generators_names=["GeneratorFluctuating_8"], ) - # check expected values - assert self.timeseries.generators_active_power.shape == ( - 24, - num_gens + 3, + assert self.edisgo.timeseries.generators_active_power.shape == (4, 1) + assert self.edisgo.timeseries.generators_reactive_power.shape == (4, 1) + exp = pd.Series( + data=pd.date_range("1/1/1970", periods=4, freq="H"), + index=[ + "load_case_mv", + "load_case_lv", + "feed-in_case_mv", + "feed-in_case_lv", + ], ) - assert self.timeseries.generators_reactive_power.shape == ( - 24, - num_gens + 3, + assert_series_equal( + self.edisgo.timeseries.timeindex_worst_cases, exp, check_dtype=False ) - assert np.isclose( - self.timeseries.generators_active_power.loc[ - timeindex, [gen_name2, gen_name3] - ].values, - [p_nom2 * 0.97, p_nom3 * 0.98], - ).all() - assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - timeindex, [gen_name2, gen_name3] - ].values, - [p_nom2 * 0.5, p_nom3 * 0.4], + assert ( + self.edisgo.timeseries.timeindex + == self.edisgo.timeseries.timeindex_worst_cases.values ).all() - # remove added generators - self.topology.remove_generator(gen_name) - self.topology.remove_generator(gen_name2) - self.topology.remove_generator(gen_name3) - # TEST TIMESERIES IMPORT - # test import timeseries from dbs - timeindex = pd.date_range("1/1/2011", periods=24, freq="H") - ts_gen_dispatchable = pd.DataFrame({"other": [0.775] * 24}, index=timeindex) - storage_units_active_power.index = timeindex - timeseries.get_component_timeseries( - timeindex=timeindex, - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=storage_units_active_power, - ) - - # add single mv solar generator - gen_name = self.topology.add_generator( - generator_id=5, - p_nom=p_nom, - bus="Bus_BranchTee_LVGrid_1_7", - generator_type="solar", - weather_cell_id=1122075, - ) - timeseries.add_generators_timeseries(self, gen_name) - assert self.timeseries.generators_active_power.shape == ( - 24, - num_gens + 1, - ) - assert self.timeseries.generators_reactive_power.shape == ( - 24, - num_gens + 1, - ) - # Todo: check values - - # add multiple generators and check - p_nom2 = 1.3 - gen_name2 = self.topology.add_generator( - generator_id=2, - p_nom=p_nom2, - bus="Bus_Generator_1", - generator_type="gas", - ) - p_nom3 = 2.4 - gen_name3 = self.topology.add_generator( - generator_id=6, - p_nom=p_nom3, - bus="Bus_BranchTee_LVGrid_1_14", - generator_type="hydro", - ) - new_gens_active_power = pd.DataFrame( - index=timeindex, - columns=[gen_name2, gen_name3], - data=( - np.array([[p_nom2 * 0.97], [p_nom3 * 0.98]]) - .repeat(len(timeindex), axis=1) - .T - ), - ) - timeseries.add_generators_timeseries(self, [gen_name2, gen_name3]) - assert self.timeseries.generators_active_power.shape == ( - 24, - num_gens + 3, + + def test_worst_case_generators(self): + + # ######### check both feed-in and load case + df = assign_voltage_level_to_component( + self.edisgo.topology.generators_df, self.edisgo.topology.buses_df + ) + p_ts, q_ts = self.edisgo.timeseries._worst_case_generators( + cases=["feed-in_case", "load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + number_of_cols = len(df.index) + assert p_ts.shape == (4, number_of_cols) + assert q_ts.shape == (4, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv", "load_case_mv", "load_case_lv"] + comp = "Generator_1" # gas, mv + p_nom = 0.775 + exp = pd.Series( + data=[1.0 * p_nom, 1.0 * p_nom, 0.0, 0.0], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "GeneratorFluctuating_2" # wind, mv + p_nom = 2.3 + exp = pd.Series( + data=[1.0 * p_nom, 1.0 * p_nom, 0.0, 0.0], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "GeneratorFluctuating_3" # solar, mv + p_nom = 2.67 + exp = pd.Series( + data=[0.85 * p_nom, 0.85 * p_nom, 0.0, 0.0], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "GeneratorFluctuating_20" # solar, lv + p_nom = 0.005 + exp = pd.Series( + data=[0.85 * p_nom, 0.85 * p_nom, 0.0, 0.0], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.95)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["Generator_1", "type"] + == "fixed_cosphi" + ) + + # ########### test for only feed-in case + p_ts, q_ts = self.edisgo.timeseries._worst_case_generators( + cases=["feed-in_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv"] + comp = "GeneratorFluctuating_2" # wind, mv + p_nom = 2.3 + exp = pd.Series( + data=[1.0 * p_nom, 1.0 * p_nom], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at[ + "GeneratorFluctuating_2", "type" + ] + == "fixed_cosphi" + ) + + # ########### test for only load case + p_ts, q_ts = self.edisgo.timeseries._worst_case_generators( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["load_case_mv", "load_case_lv"] + comp = "GeneratorFluctuating_20" # solar, lv + exp = pd.Series( + data=[0.0, 0.0], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.95)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at[ + "GeneratorFluctuating_20", "type" + ] + == "fixed_cosphi" + ) + + # ########## test error raising in case of missing load/generator parameter + + comp = "GeneratorFluctuating_14" + df.at[comp, "type"] = None + with pytest.raises(AttributeError): + self.edisgo.timeseries._worst_case_generators( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + def test_worst_case_conventional_load(self): + + # connect one load to MV + self.edisgo.topology._loads_df.at[ + "Load_agricultural_LVGrid_1_1", "bus" + ] = "Bus_BranchTee_MVGrid_1_2" + + # ######### check both feed-in and load case + df = assign_voltage_level_to_component( + self.edisgo.topology.loads_df, self.edisgo.topology.buses_df + ) + p_ts, q_ts = self.edisgo.timeseries._worst_case_conventional_load( + cases=["feed-in_case", "load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + number_of_cols = len(df.index) + assert p_ts.shape == (4, number_of_cols) + assert q_ts.shape == (4, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv", "load_case_mv", "load_case_lv"] + comp = "Load_agricultural_LVGrid_1_1" # mv + p_set = 0.0523 + exp = pd.Series( + data=[0.15 * p_set, 0.1 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "Load_agricultural_LVGrid_8_1" # lv + p_set = 0.0478 + exp = pd.Series( + data=[0.15 * p_set, 0.1 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(0.95)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at[ + "Load_agricultural_LVGrid_8_1", "type" + ] + == "fixed_cosphi" + ) + + # ########### test for only feed-in case + p_ts, q_ts = self.edisgo.timeseries._worst_case_conventional_load( + cases=["feed-in_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv"] + comp = "Load_agricultural_LVGrid_8_1" # lv + p_set = 0.0478 + exp = pd.Series( + data=[0.15 * p_set, 0.1 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(0.95)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at[ + "Load_agricultural_LVGrid_8_1", "type" + ] + == "fixed_cosphi" + ) + + # ########### test for only load case + p_ts, q_ts = self.edisgo.timeseries._worst_case_conventional_load( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["load_case_mv", "load_case_lv"] + comp = "Load_agricultural_LVGrid_1_1" # mv + p_set = 0.0523 + exp = pd.Series( + data=[1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at[ + "Load_agricultural_LVGrid_1_1", "type" + ] + == "fixed_cosphi" + ) + + # ########## test error raising in case of missing load/generator parameter + + comp = "Load_agricultural_LVGrid_1_1" + df.at[comp, "voltage_level"] = None + with pytest.raises(AttributeError): + self.edisgo.timeseries._worst_case_conventional_load( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + def test_worst_case_charging_points(self): + # add charging points to MV and LV + df_cp = pd.DataFrame( + { + "bus": [ + "Bus_BranchTee_MVGrid_1_2", + "Bus_BranchTee_MVGrid_1_2", + "Bus_BranchTee_LVGrid_1_5", + "Bus_BranchTee_LVGrid_1_5", + ], + "p_set": [0.1, 0.2, 0.3, 0.4], + "type": [ + "charging_point", + "charging_point", + "charging_point", + "charging_point", + ], + "sector": ["hpc", "public", "home", "work"], + }, + index=["CP1", "CP2", "CP3", "CP4"], + ) + + # ######### check both feed-in and load case + df = assign_voltage_level_to_component(df_cp, self.edisgo.topology.buses_df) + p_ts, q_ts = self.edisgo.timeseries._worst_case_charging_points( + cases=["feed-in_case", "load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + number_of_cols = len(df.index) + assert p_ts.shape == (4, number_of_cols) + assert q_ts.shape == (4, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv", "load_case_mv", "load_case_lv"] + comp = "CP1" # mv, hpc + p_set = 0.1 + exp = pd.Series( + data=[0.15 * p_set, 0.0 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "CP2" # mv, public + p_set = 0.2 + exp = pd.Series( + data=[0.15 * p_set, 0.0 * p_set, 1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "CP3" # lv, home + p_set = 0.3 + exp = pd.Series( + data=[0.15 * p_set, 0.0 * p_set, 0.2 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "CP4" # lv, work + p_set = 0.4 + exp = pd.Series( + data=[0.15 * p_set, 0.0 * p_set, 0.2 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["CP4", "type"] + == "fixed_cosphi" + ) + + # ########### test for only feed-in case + p_ts, q_ts = self.edisgo.timeseries._worst_case_charging_points( + cases=["feed-in_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv"] + comp = "CP3" # lv, home + p_set = 0.3 + exp = pd.Series( + data=[0.15 * p_set, 0.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["CP3", "type"] + == "fixed_cosphi" + ) + + # ########### test for only load case + p_ts, q_ts = self.edisgo.timeseries._worst_case_charging_points( + cases=["load_case"], df=df, configs=self.edisgo.config ) - assert self.timeseries.generators_reactive_power.shape == ( - 24, - num_gens + 3, + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["load_case_mv", "load_case_lv"] + comp = "CP2" # mv, public + p_set = 0.2 + exp = pd.Series( + data=[1.0 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["CP2", "type"] + == "fixed_cosphi" + ) + + # ########## test error raising in case of missing load/generator parameter + + comp = "CP2" + df.at[comp, "voltage_level"] = None + with pytest.raises(AttributeError): + self.edisgo.timeseries._worst_case_charging_points( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + def test_worst_case_heat_pumps(self): + # add heat pumps to MV and LV + df_hp = pd.DataFrame( + { + "bus": ["Bus_BranchTee_MVGrid_1_2", "Bus_BranchTee_LVGrid_1_5"], + "p_set": [0.1, 0.2], + "type": ["heat_pump", "heat_pump"], + }, + index=["HP1", "HP2"], + ) + + # ######### check both feed-in and load case + df = assign_voltage_level_to_component(df_hp, self.edisgo.topology.buses_df) + p_ts, q_ts = self.edisgo.timeseries._worst_case_heat_pumps( + cases=["feed-in_case", "load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + number_of_cols = len(df.index) + assert p_ts.shape == (4, number_of_cols) + assert q_ts.shape == (4, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv", "load_case_mv", "load_case_lv"] + comp = "HP1" # mv + p_set = 0.1 + exp = pd.Series( + data=[0.0 * p_set, 0.0 * p_set, 0.8 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + comp = "HP2" # lv + p_set = 0.2 + exp = pd.Series( + data=[0.0 * p_set, 0.0 * p_set, 0.8 * p_set, 1.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["HP1", "type"] + == "fixed_cosphi" + ) + + # ########### test for only feed-in case + p_ts, q_ts = self.edisgo.timeseries._worst_case_heat_pumps( + cases=["feed-in_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv"] + comp = "HP2" # lv + p_set = 0.2 + exp = pd.Series( + data=[0.0 * p_set, 0.0 * p_set], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["HP2", "type"] + == "fixed_cosphi" + ) + + # ########### test for only load case + p_ts, q_ts = self.edisgo.timeseries._worst_case_heat_pumps( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["load_case_mv", "load_case_lv"] + comp = "HP1" # mv + p_set = 0.1 + exp = pd.Series( + data=[0.8 * p_set, 1.0 * p_set], + name=comp, + index=index, ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = tan(acos(1.0)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["HP1", "type"] + == "fixed_cosphi" + ) + + # ########## test error raising in case of missing load/generator parameter + + comp = "HP1" + df.at[comp, "voltage_level"] = None + with pytest.raises(AttributeError): + self.edisgo.timeseries._worst_case_heat_pumps( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + def test_worst_case_storage_units(self): + + # ######### check both feed-in and load case + df = assign_voltage_level_to_component( + self.edisgo.topology.storage_units_df, self.edisgo.topology.buses_df + ) + p_ts, q_ts = self.edisgo.timeseries._worst_case_storage_units( + cases=["feed-in_case", "load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + number_of_cols = len(df.index) + assert p_ts.shape == (4, number_of_cols) + assert q_ts.shape == (4, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv", "load_case_mv", "load_case_lv"] + comp = "Storage_1" + p_nom = 0.4 + exp = pd.Series( + data=[1.0 * p_nom, 1.0 * p_nom, -1.0 * p_nom, -1.0 * p_nom], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["Storage_1", "type"] + == "fixed_cosphi" + ) + + # ########### test for only feed-in case + p_ts, q_ts = self.edisgo.timeseries._worst_case_storage_units( + cases=["feed-in_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["feed-in_case_mv", "feed-in_case_lv"] + comp = "Storage_1" + p_nom = 0.4 + exp = pd.Series( + data=[1.0 * p_nom, 1.0 * p_nom], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["Storage_1", "type"] + == "fixed_cosphi" + ) + + # ########### test for only load case + p_ts, q_ts = self.edisgo.timeseries._worst_case_storage_units( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + # check shape + assert p_ts.shape == (2, number_of_cols) + assert q_ts.shape == (2, number_of_cols) + + # check values + index = ["load_case_mv", "load_case_lv"] + comp = "Storage_1" + p_nom = 0.4 + exp = pd.Series( + data=[-1.0 * p_nom, -1.0 * p_nom], + name=comp, + index=index, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + pf = -tan(acos(0.9)) + assert_series_equal(q_ts.loc[:, comp], exp * pf, check_dtype=False) + + # check TimeSeriesRaw + assert len(self.edisgo.timeseries.time_series_raw.q_control) == len(df) + assert ( + self.edisgo.timeseries.time_series_raw.q_control.at["Storage_1", "type"] + == "fixed_cosphi" + ) + + # ########## test error raising in case of missing load/generator parameter + + comp = "Storage_1" + df.at[comp, "voltage_level"] = None + with pytest.raises(AttributeError): + self.edisgo.timeseries._worst_case_storage_units( + cases=["load_case"], df=df, configs=self.edisgo.config + ) + + @pytest.mark.slow + def test_predefined_fluctuating_generators_by_technology(self): + + timeindex = pd.date_range("1/1/2011 12:00", periods=2, freq="H") + self.edisgo.timeseries.timeindex = timeindex + + # ############# oedb, all generators (default) + self.edisgo.timeseries.predefined_fluctuating_generators_by_technology( + self.edisgo, "oedb" + ) + + # check shape + fluctuating_gens = self.edisgo.topology.generators_df[ + self.edisgo.topology.generators_df.type.isin(["wind", "solar"]) + ] + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, len(fluctuating_gens)) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.shape + == (2, 8) + ) + # fmt: on + + # check values + comp = "GeneratorFluctuating_2" # wind, w_id = 1122074 + p_nom = 2.3 + exp = pd.Series( + data=[0.0 * p_nom, 0.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + comp = "GeneratorFluctuating_8" # wind, w_id = 1122075 + p_nom = 3.0 + exp = pd.Series( + data=[0.0029929 * p_nom, 0.009521 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + comp = "GeneratorFluctuating_25" # solar, w_id = 1122075 + p_nom = 0.006 + exp = pd.Series( + data=[0.07824 * p_nom, 0.11216 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + + # ############# own settings (without weather cell ID), all generators + gens_p = pd.DataFrame( + data={ + "wind": [1, 2], + "solar": [3, 4], + }, + index=timeindex, + ) + self.edisgo.timeseries.predefined_fluctuating_generators_by_technology( + self.edisgo, gens_p + ) + + # check shape + fluctuating_gens = self.edisgo.topology.generators_df[ + self.edisgo.topology.generators_df.type.isin(["wind", "solar"]) + ] + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, len(fluctuating_gens)) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.shape + == (2, 10) + ) + # fmt: on + + # check values + comp = "GeneratorFluctuating_2" # wind + p_nom = 2.3 + exp = pd.Series( + data=[1.0 * p_nom, 2.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + comp = "GeneratorFluctuating_20" # solar + p_nom = 0.005 + exp = pd.Series( + data=[3.0 * p_nom, 4.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + + # ############# own settings (with weather cell ID), selected generators + self.edisgo.timeseries.timeindex = timeindex + gens_p = pd.DataFrame( + data={ + ("wind", 1122074): [5, 6], + ("solar", 1122075): [7, 8], + }, + index=timeindex, + ) + self.edisgo.timeseries.predefined_fluctuating_generators_by_technology( + self.edisgo, + gens_p, + generator_names=["GeneratorFluctuating_4", "GeneratorFluctuating_2"], + ) + + # check shape (should be the same as before, as time series are not reset but + # overwritten) + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, len(fluctuating_gens)) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.shape + == (2, 10) + ) + # fmt: on + + # check values (check that values are overwritten) + comp = "GeneratorFluctuating_2" # wind + p_nom = 2.3 + exp = pd.Series( + data=[5.0 * p_nom, 6.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + comp = "GeneratorFluctuating_4" # solar + p_nom = 1.93 + exp = pd.Series( + data=[7.0 * p_nom, 8.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False) + # fmt: off + assert_series_equal( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.loc[ + :, ("wind", 1122074) + ], + gens_p.loc[:, ("wind", 1122074)], + check_dtype=False, + ) + # fmt: on + + # ############# own settings (with weather cell ID), all generators (check, that + # time series for generators are set for those for which time series are + # provided) + self.edisgo.timeseries.reset() + self.edisgo.timeseries.predefined_fluctuating_generators_by_technology( + self.edisgo, gens_p + ) + + # check shape + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, 22) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.shape + == (2, 2) + ) + # fmt: on + + def test_predefined_dispatchable_generators_by_technology(self): + + timeindex = pd.date_range("1/1/2011 12:00", periods=2, freq="H") + self.edisgo.timeseries.timeindex = timeindex + + # ############# all generators (default), with "other" + gens_p = pd.DataFrame( + data={ + "other": [5, 6], + }, + index=timeindex, + ) + + self.edisgo.timeseries.predefined_dispatchable_generators_by_technology( + self.edisgo, gens_p + ) + + # check shape + dispatchable_gens = self.edisgo.topology.generators_df[ + ~self.edisgo.topology.generators_df.type.isin(["wind", "solar"]) + ] + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, len(dispatchable_gens)) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + dispatchable_generators_active_power_by_technology.shape + == (2, 1) + ) + # fmt: on + + # check values + comp = "Generator_1" # gas + p_nom = 0.775 + exp = pd.Series( + data=[5.0 * p_nom, 6.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + + # ############# all generators (default), with "gas" and "other" + # overwrite type of generator GeneratorFluctuating_2 + self.edisgo.topology._generators_df.at[ + "GeneratorFluctuating_2", "type" + ] = "coal" + gens_p = pd.DataFrame( + data={ + "other": [5, 6], + "gas": [7, 8], + }, + index=timeindex, + ) + + self.edisgo.timeseries.predefined_dispatchable_generators_by_technology( + self.edisgo, gens_p + ) + + # check shape + dispatchable_gens = self.edisgo.topology.generators_df[ + ~self.edisgo.topology.generators_df.type.isin(["wind", "solar"]) + ] + p_ts = self.edisgo.timeseries.generators_active_power + assert p_ts.shape == (2, len(dispatchable_gens)) + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + dispatchable_generators_active_power_by_technology.shape + == (2, 2) + ) + # fmt: on + + # check values + comp = "Generator_1" # gas + p_nom = 0.775 + exp = pd.Series( + data=[7.0 * p_nom, 8.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + comp = "GeneratorFluctuating_2" # coal (other) + p_nom = 2.3 + exp = pd.Series( + data=[5.0 * p_nom, 6.0 * p_nom], + name=comp, + index=timeindex, + ) + assert_series_equal(p_ts.loc[:, comp], exp, check_dtype=False, atol=1e-5) + # fmt: off + assert_series_equal( + self.edisgo.timeseries.time_series_raw. + dispatchable_generators_active_power_by_technology.loc[ + :, "other" + ], + gens_p.loc[:, "other"], + check_dtype=False, + ) + # fmt: on + + def test_predefined_conventional_loads_by_sector(self, caplog): + index = pd.date_range("1/1/2018", periods=3, freq="H") + self.edisgo.timeseries.timeindex = index + + # test assertion error + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, pd.DataFrame() + ) + assert "The profile you entered is empty. Method is skipped." in caplog.text + + # define expected profiles + profiles = pd.DataFrame( + index=index, + columns=["retail", "residential", "agricultural", "industrial"], + data=[ + [0.0000597, 0.0000782, 0.0000654, 0.0000992], + [0.0000526, 0.0000563, 0.0000611, 0.0000992], + [0.0000459, 0.0000451, 0.0000585, 0.0000992], + ], + ) + + # test demandlib - single loads + loads = [ + "Load_agricultural_LVGrid_5_2", + "Load_agricultural_LVGrid_9_1", + "Load_residential_LVGrid_9_2", + "Load_retail_LVGrid_9_14", + "Load_residential_LVGrid_5_3", + "Load_industrial_LVGrid_6_1", + "Load_agricultural_LVGrid_7_1", + ] + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, "demandlib", load_names=loads + ) + # fmt: off + assert self.edisgo.timeseries.time_series_raw.\ + conventional_loads_active_power_by_sector.shape\ + == (3, 4) + assert_frame_equal( + self.edisgo.timeseries.time_series_raw. + conventional_loads_active_power_by_sector, + profiles, + atol=1e-7, + ) + # fmt: on + assert self.edisgo.timeseries.loads_active_power.shape == (3, 7) + assert np.isclose( + self.edisgo.timeseries.loads_active_power[ + "Load_agricultural_LVGrid_5_2" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_agricultural_LVGrid_5_2", "annual_consumption" + ] + * profiles["agricultural"] + ).values, + atol=1e-4, + ).all() + assert np.isclose( + self.edisgo.timeseries.loads_active_power[ + "Load_residential_LVGrid_5_3" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_residential_LVGrid_5_3", "annual_consumption" + ] + * profiles["residential"] + ).values, + atol=1e-4, + ).all() + assert np.isclose( + self.edisgo.timeseries.loads_active_power["Load_retail_LVGrid_9_14"].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_retail_LVGrid_9_14", "annual_consumption" + ] + * profiles["retail"] + ).values, + atol=1e-4, + ).all() assert np.isclose( - self.timeseries.generators_active_power.loc[ - timeindex, [gen_name2, gen_name3] + self.edisgo.timeseries.loads_active_power[ + "Load_industrial_LVGrid_6_1" ].values, - [p_nom2 * 0.775, p_nom3 * 0.775], + ( + self.edisgo.topology.loads_df.loc[ + "Load_industrial_LVGrid_6_1", "annual_consumption" + ] + * profiles["industrial"] + ).values, + atol=1e-4, ).all() + # test demandlib - all + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, "demandlib" + ) + # fmt: off + assert self.edisgo.timeseries.time_series_raw.\ + conventional_loads_active_power_by_sector.shape\ + == (3, 4) + # fmt: on + assert self.edisgo.timeseries.loads_active_power.shape == (3, 50) assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - timeindex, [gen_name2, gen_name3] + self.edisgo.timeseries.loads_active_power[ + "Load_industrial_LVGrid_6_1" ].values, - [ - -tan(acos(0.9)) * p_nom2 * 0.775, - -tan(acos(0.95)) * p_nom3 * 0.775, - ], + [0.05752256] * 3, ).all() - # check values when reactive power is inserted as timeseries - new_gens_reactive_power = pd.DataFrame( - index=timeindex, - columns=[gen_name2, gen_name3], - data=( - np.array([[p_nom2 * 0.54], [p_nom3 * 0.45]]) - .repeat(len(timeindex), axis=1) - .T - ), - ) - timeseries.add_generators_timeseries( - self, - [gen_name2, gen_name3], - timeseries_generation_dispatchable=new_gens_active_power, - generation_reactive_power=new_gens_reactive_power, + assert np.isclose( + self.edisgo.timeseries.loads_active_power.loc[ + index[1], "Load_agricultural_LVGrid_5_2" + ], + 0.0274958, ) - assert self.timeseries.generators_active_power.shape == ( - 24, - num_gens + 3, + assert np.isclose( + self.edisgo.timeseries.loads_active_power.loc[ + index, "Load_residential_LVGrid_9_2" + ].values, + [0.00038328, 0.00027608, 0.00022101], + ).all() + # test assertion error + with pytest.raises(ValueError) as exc_info: + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, "random" + ) + assert ( + exc_info.value.args[0] + == "'ts_loads' must either be a pandas DataFrame or 'demandlib'." + ) + # test manual - all + profiles = pd.DataFrame( + index=index, + columns=["retail", "residential", "agricultural", "industrial"], + data=[ + [0.003, 0.02, 0.00, 0.1], + [0.004, 0.01, 0.10, 0.2], + [0.002, 0.06, 0.25, 1.0], + ], ) - assert self.timeseries.generators_reactive_power.shape == ( - 24, - num_gens + 3, + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, profiles ) assert np.isclose( - self.timeseries.generators_active_power.loc[ - timeindex, [gen_name2, gen_name3] + self.edisgo.timeseries.loads_active_power[ + "Load_agricultural_LVGrid_5_2" ].values, - [p_nom2 * 0.775, p_nom3 * 0.775], + ( + self.edisgo.topology.loads_df.loc[ + "Load_agricultural_LVGrid_5_2", "annual_consumption" + ] + * profiles["agricultural"] + ).values, ).all() assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - timeindex, [gen_name2, gen_name3] + self.edisgo.timeseries.loads_active_power[ + "Load_residential_LVGrid_5_3" ].values, - [p_nom2 * 0.54, p_nom3 * 0.45], + ( + self.edisgo.topology.loads_df.loc[ + "Load_residential_LVGrid_5_3", "annual_consumption" + ] + * profiles["residential"] + ).values, ).all() - # remove added generators - self.topology.remove_generator(gen_name) - self.topology.remove_generator(gen_name2) - self.topology.remove_generator(gen_name3) - - def test_add_storage_unit_timeseries(self): - """Test add_storage_unit_timeseries method""" - # TEST WORST-CASE - # add single storage unit - num_storage_units = len(self.topology.storage_units_df) - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case") - p_nom = 2.1 - timeindex = pd.date_range("1/1/1970", periods=2, freq="H") - storage_name = self.topology.add_storage_unit( - bus="Bus_MVStation_1", p_nom=p_nom - ) - timeseries.add_storage_units_timeseries(self, storage_name) - assert (self.timeseries.storage_units_active_power.index == timeindex).all() - assert (self.timeseries.storage_units_reactive_power.index == timeindex).all() - assert self.timeseries.storage_units_active_power.shape == ( - len(timeindex), - num_storage_units + 1, - ) - assert self.timeseries.storage_units_reactive_power.shape == ( - len(timeindex), - num_storage_units + 1, - ) - assert ( - self.timeseries.storage_units_active_power.loc[ - timeindex, storage_name - ].values - == [p_nom, -p_nom] + assert np.isclose( + self.edisgo.timeseries.loads_active_power["Load_retail_LVGrid_9_14"].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_retail_LVGrid_9_14", "annual_consumption" + ] + * profiles["retail"] + ).values, ).all() - assert ( - np.isclose( - np.array( - self.timeseries.storage_units_reactive_power.loc[ - timeindex, storage_name - ].values, - dtype=float, - ), - [-p_nom * tan(acos(0.9)), p_nom * tan(acos(0.9))], - ) + assert np.isclose( + self.edisgo.timeseries.loads_active_power[ + "Load_industrial_LVGrid_6_1" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_industrial_LVGrid_6_1", "annual_consumption" + ] + * profiles["industrial"] + ).values, ).all() - # add two storage units - p_nom2 = 1.3 - storage_name2 = self.topology.add_storage_unit( - bus="Bus_BranchTee_LVGrid_1_13", p_nom=p_nom2 - ) - p_nom3 = 3.12 - storage_name3 = self.topology.add_storage_unit( - bus="BusBar_MVGrid_1_LVGrid_6_MV", p_nom=p_nom3 - ) - timeseries.add_storage_units_timeseries(self, [storage_name2, storage_name3]) - assert self.timeseries.storage_units_active_power.shape == ( - len(timeindex), - num_storage_units + 3, + # test manual - single loads + profiles_new = ( + pd.DataFrame( + index=index, + columns=["retail", "residential", "agricultural", "industrial"], + data=[ + [0.003, 0.02, 0.00, 0.1], + [0.004, 0.01, 0.10, 0.2], + [0.002, 0.06, 0.25, 1.0], + ], + ) + * 5 ) - assert self.timeseries.storage_units_reactive_power.shape == ( - len(timeindex), - num_storage_units + 3, + loads = ["Load_industrial_LVGrid_6_1", "Load_residential_LVGrid_5_3"] + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, profiles_new, load_names=loads ) assert np.isclose( - np.array( - self.timeseries.storage_units_active_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - dtype=float, - ), - [[p_nom2, p_nom3], [-p_nom2, -p_nom3]], + self.edisgo.timeseries.loads_active_power[ + "Load_agricultural_LVGrid_5_2" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_agricultural_LVGrid_5_2", "annual_consumption" + ] + * profiles["agricultural"] + ).values, ).all() assert np.isclose( - np.array( - self.timeseries.storage_units_reactive_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - dtype=float, - ), - [ - [-tan(acos(0.95)) * p_nom2, -tan(acos(0.9)) * p_nom3], - [tan(acos(0.95)) * p_nom2, tan(acos(0.9)) * p_nom3], - ], + self.edisgo.timeseries.loads_active_power[ + "Load_residential_LVGrid_5_3" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_residential_LVGrid_5_3", "annual_consumption" + ] + * profiles_new["residential"] + ).values, ).all() - # remove storage units - self.topology.remove_storage_unit(storage_name) - self.topology.remove_storage_unit(storage_name2) - self.topology.remove_storage_unit(storage_name3) - # TEST MANUAL - timeindex = pd.date_range("1/1/2018", periods=24, freq="H") - ( - generators_active_power, - generators_reactive_power, - loads_active_power, - loads_reactive_power, - storage_units_active_power, - storage_units_reactive_power, - ) = self.create_random_timeseries_for_topology(timeindex) - - timeseries.get_component_timeseries( - edisgo_obj=self, - mode="manual", - timeindex=timeindex, - loads_active_power=loads_active_power, - loads_reactive_power=loads_reactive_power, - generators_active_power=generators_active_power, - generators_reactive_power=generators_reactive_power, - storage_units_active_power=storage_units_active_power, - storage_units_reactive_power=storage_units_reactive_power, - ) - # add single mv solar generator - storage_name = self.topology.add_storage_unit( - bus="Bus_MVStation_1", p_nom=p_nom - ) - new_storage_active_power = pd.DataFrame( - index=timeindex, - columns=[storage_name], - data=([p_nom * 0.97] * len(timeindex)), - ) - new_storage_reactive_power = pd.DataFrame( - index=timeindex, - columns=[storage_name], - data=([p_nom * 0.5] * len(timeindex)), - ) - timeseries.add_storage_units_timeseries( - self, - storage_name, - storage_units_active_power=new_storage_active_power, - storage_units_reactive_power=new_storage_reactive_power, - ) - # check expected values - assert self.timeseries.storage_units_active_power.shape == ( - 24, - num_storage_units + 1, - ) - assert self.timeseries.storage_units_reactive_power.shape == ( - 24, - num_storage_units + 1, - ) - assert (self.timeseries.storage_units_active_power.index == timeindex).all() - assert ( - self.timeseries.storage_units_active_power.loc[ - timeindex, storage_name - ].values - == 0.97 * p_nom + assert np.isclose( + self.edisgo.timeseries.loads_active_power["Load_retail_LVGrid_9_14"].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_retail_LVGrid_9_14", "annual_consumption" + ] + * profiles["retail"] + ).values, ).all() assert np.isclose( - self.timeseries.storage_units_reactive_power.loc[timeindex, storage_name], - p_nom * 0.5, + self.edisgo.timeseries.loads_active_power[ + "Load_industrial_LVGrid_6_1" + ].values, + ( + self.edisgo.topology.loads_df.loc[ + "Load_industrial_LVGrid_6_1", "annual_consumption" + ] + * profiles_new["industrial"] + ).values, ).all() - # add multiple generators and check - p_nom2 = 1.3 - storage_name2 = self.topology.add_storage_unit( - bus="Bus_BranchTee_LVGrid_1_13", p_nom=p_nom2 + + def test_predefined_charging_points_by_use_case(self, caplog): + index = pd.date_range("1/1/2018", periods=3, freq="H") + self.edisgo.timeseries.timeindex = index + + # test assertion error + self.edisgo.timeseries.predefined_conventional_loads_by_sector( + self.edisgo, pd.DataFrame() + ) + assert "The profile you entered is empty. Method is skipped." in caplog.text + + # add charging points to MV and LV + df_cp = pd.DataFrame( + { + "bus": [ + "Bus_BranchTee_MVGrid_1_2", + "Bus_BranchTee_MVGrid_1_2", + "Bus_BranchTee_LVGrid_1_5", + "Bus_BranchTee_LVGrid_1_5", + ], + "p_set": [0.1, 0.2, 0.3, 0.4], + "type": [ + "charging_point", + "charging_point", + "charging_point", + "charging_point", + ], + "sector": ["hpc", "public", "home", "work"], + }, + index=["CP1", "CP2", "CP3", "CP4"], ) - p_nom3 = 3.12 - storage_name3 = self.topology.add_storage_unit( - bus="BusBar_MVGrid_1_LVGrid_6_MV", p_nom=p_nom3 + self.edisgo.topology.loads_df = pd.concat( + [ + self.edisgo.topology.loads_df, + df_cp, + ] ) - new_storages_active_power = pd.DataFrame( - index=timeindex, - columns=[storage_name2, storage_name3], - data=( - np.array([[p_nom2 * 0.97], [p_nom3 * 0.98]]) - .repeat(len(timeindex), axis=1) - .T - ), + # test all charging points + profiles = pd.DataFrame( + index=index, + columns=["hpc", "public", "home", "work"], + data=[ + [3.03, 0.22, 0.01, 0.1], + [2.04, 0.41, 0.20, 0.2], + [7.01, 0.16, 0.24, 1.0], + ], ) - new_storages_reactive_power = pd.DataFrame( - index=timeindex, - columns=[storage_name2, storage_name3], - data=( - np.array([[p_nom2 * 0.5], [p_nom3 * 0.4]]) - .repeat(len(timeindex), axis=1) - .T + self.edisgo.timeseries.predefined_charging_points_by_use_case( + self.edisgo, profiles + ) + # fmt: off + assert self.edisgo.timeseries.time_series_raw.\ + charging_points_active_power_by_use_case.shape\ + == (3, 4) + # fmt: on + + for name, cp in df_cp.iterrows(): + assert np.isclose( + self.edisgo.timeseries.loads_active_power[name].values, + ( + self.edisgo.topology.charging_points_df.loc[name, "p_set"] + * profiles[cp.sector] + ).values, + ).all() + # test single charging points + profiles_new = profiles * 0.5 + self.edisgo.timeseries.predefined_charging_points_by_use_case( + self.edisgo, profiles_new, load_names=["CP1", "CP3"] + ) + for name, cp in df_cp.iterrows(): + if name in ["CP1", "CP3"]: + assert np.isclose( + self.edisgo.timeseries.loads_active_power[name].values, + ( + self.edisgo.topology.charging_points_df.loc[name, "p_set"] + * profiles_new[cp.sector] + ).values, + ).all() + else: + assert np.isclose( + self.edisgo.timeseries.loads_active_power[name].values, + ( + self.edisgo.topology.charging_points_df.loc[name, "p_set"] + * profiles[cp.sector] + ).values, + ).all() + # test warning + profiles = pd.DataFrame( + index=index, + columns=["residential", "public", "home"], + data=[[3.03, 0.01, 0.1], [2.04, 0.20, 0.2], [7.01, 0.24, 1.0]], + ) + with pytest.raises(Warning) as exc_info: + self.edisgo.timeseries.predefined_charging_points_by_use_case( + self.edisgo, profiles + ) + assert ( + exc_info.value.args[0] + == "Not all affected loads are charging points. Please check and" + " adapt if necessary." + ) + # fmt: off + assert self.edisgo.timeseries.time_series_raw.\ + charging_points_active_power_by_use_case.shape\ + == (3, 5) + # fmt: on + + def test_fixed_cosphi(self): + # set active power time series for fixed cosphi + timeindex = pd.date_range("1/1/1970", periods=3, freq="H") + self.edisgo.set_timeindex(timeindex) + ts_solar = np.array([0.1, 0.2, 0.3]) + ts_wind = [0.4, 0.5, 0.6] + self.edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts=pd.DataFrame( + {"solar": ts_solar, "wind": ts_wind}, index=timeindex ), + dispatchable_generators_ts=pd.DataFrame( + {"other": ts_solar}, index=timeindex + ), + conventional_loads_ts="demandlib", + ) + self.edisgo.set_time_series_manual( + storage_units_p=pd.DataFrame({"Storage_1": ts_wind}, index=timeindex) + ) + + # test different options (default, Dataframe with default, Dataframe with + # different settings) - None is already tested in eDisGo class tests + gen = "GeneratorFluctuating_14" # solar LV generator + load_1 = "Load_agricultural_LVGrid_3_1" + load_2 = "Load_residential_LVGrid_7_3" + load_3 = "Load_residential_LVGrid_8_12" + self.edisgo.set_time_series_reactive_power_control( + generators_parametrisation=pd.DataFrame( + { + "components": [[gen]], + "mode": ["default"], + "power_factor": ["default"], + }, + index=[1], + ), + loads_parametrisation=pd.DataFrame( + { + "components": [[load_1], [load_2, load_3]], + "mode": ["default", "capacitive"], + "power_factor": ["default", 0.98], + }, + index=[1, 2], + ), + storage_units_parametrisation="default", ) - timeseries.add_storage_units_timeseries( - self, - [storage_name2, storage_name3], - storage_units_active_power=new_storages_active_power, - storage_units_reactive_power=new_storages_reactive_power, - ) - # check expected values - assert self.timeseries.storage_units_active_power.shape == ( - 24, - num_storage_units + 3, + assert self.edisgo.timeseries.generators_reactive_power.shape == (3, 1) + assert self.edisgo.timeseries.loads_reactive_power.shape == (3, 3) + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (3, 1) + assert ( + np.isclose( + self.edisgo.timeseries.generators_reactive_power.loc[:, gen], + ts_solar * -np.tan(np.arccos(0.95)) * 0.005, + ) + ).all() + assert ( + np.isclose( + self.edisgo.timeseries.loads_reactive_power.loc[:, load_1], + self.edisgo.timeseries.loads_active_power.loc[:, load_1] + * np.tan(np.arccos(0.95)), + ) + ).all() + assert ( + ( + np.isclose( + self.edisgo.timeseries.loads_reactive_power.loc[ + :, [load_2, load_3] + ], + self.edisgo.timeseries.loads_active_power.loc[:, [load_2, load_3]] + * -np.tan(np.arccos(0.98)), + ) + ) + .all() + .all() ) - assert self.timeseries.storage_units_reactive_power.shape == ( - 24, - num_storage_units + 3, + assert ( + np.isclose( + self.edisgo.timeseries.storage_units_reactive_power.loc[:, "Storage_1"], + self.edisgo.timeseries.storage_units_active_power.loc[:, "Storage_1"] + * -np.tan(np.arccos(0.9)), + ) + ).all() + + def test_residual_load(self): + self.edisgo.set_time_series_worst_case_analysis() + time_steps_load_case = self.edisgo.timeseries.timeindex_worst_cases[ + self.edisgo.timeseries.timeindex_worst_cases.index.str.contains("load") + ].values + peak_load = ( + self.edisgo.topology.loads_df.p_set.sum() + + self.edisgo.topology.storage_units_df.p_nom.sum() ) assert np.isclose( - self.timeseries.storage_units_active_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - [p_nom2 * 0.97, p_nom3 * 0.98], + self.edisgo.timeseries.residual_load.loc[time_steps_load_case], peak_load ).all() - assert np.isclose( - self.timeseries.storage_units_reactive_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - [p_nom2 * 0.5, p_nom3 * 0.4], + time_steps_feedin_case = self.edisgo.timeseries.timeindex_worst_cases[ + self.edisgo.timeseries.timeindex_worst_cases.index.str.contains("feed") + ].values + assert ( + self.edisgo.timeseries.residual_load.loc[time_steps_feedin_case] < 0 ).all() - # remove added generators - self.topology.remove_storage_unit(storage_name) - self.topology.remove_storage_unit(storage_name2) - self.topology.remove_storage_unit(storage_name3) - # TEST TIMESERIES IMPORT - # test import timeseries from dbs - timeindex = pd.date_range("1/1/2011", periods=24, freq="H") - ts_gen_dispatchable = pd.DataFrame({"other": [0.775] * 24}, index=timeindex) - # reindex timeseries - storage_units_active_power = storage_units_active_power.set_index(timeindex) - new_storage_active_power = new_storage_active_power.set_index(timeindex) - new_storage_reactive_power = new_storage_reactive_power.set_index(timeindex) - new_storages_active_power = new_storages_active_power.set_index(timeindex) - new_storages_reactive_power = new_storages_reactive_power.set_index(timeindex) - timeseries.get_component_timeseries( - timeindex=timeindex, - edisgo_obj=self, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=storage_units_active_power, - ) - - # add single mv solar generator - storage_name = self.topology.add_storage_unit( - bus="Bus_MVStation_1", p_nom=p_nom - ) - - timeseries.add_storage_units_timeseries( - self, - storage_name, - timeseries_storage_units=new_storage_active_power, - timeseries_storage_units_reactive_power=new_storage_reactive_power, - ) - assert self.timeseries.storage_units_active_power.shape == ( - 24, - num_storage_units + 1, - ) - assert self.timeseries.storage_units_reactive_power.shape == ( - 24, - num_storage_units + 1, - ) - assert_frame_equal( - self.timeseries.storage_units_active_power.loc[timeindex, [storage_name]], - new_storage_active_power, - ) - assert_frame_equal( - self.timeseries.storage_units_reactive_power.loc[timeindex, [storage_name]], - new_storage_reactive_power, - ) - # add multiple generators and check - p_nom2 = 1.3 - storage_name2 = self.topology.add_storage_unit( - bus="Bus_BranchTee_LVGrid_1_13", p_nom=p_nom2 - ) - p_nom3 = 3.12 - storage_name3 = self.topology.add_storage_unit( - bus="BusBar_MVGrid_1_LVGrid_6_MV", p_nom=p_nom3 - ) + def test_timesteps_load_feedin_case(self): + self.edisgo.set_time_series_worst_case_analysis() + time_steps_load_case = self.edisgo.timeseries.timeindex_worst_cases[ + self.edisgo.timeseries.timeindex_worst_cases.index.str.contains("load") + ].values + assert ( + self.edisgo.timeseries.timesteps_load_feedin_case.loc[time_steps_load_case] + == "load_case" + ).all() + time_steps_feedin_case = self.edisgo.timeseries.timeindex_worst_cases[ + self.edisgo.timeseries.timeindex_worst_cases.index.str.contains("feed") + ].values + assert ( + self.edisgo.timeseries.timesteps_load_feedin_case.loc[ + time_steps_feedin_case + ] + == "feed-in_case" + ).all() - timeseries.add_storage_units_timeseries( - self, - [storage_name2, storage_name3], - timeseries_storage_units=new_storages_active_power, - ) + def test_reduce_memory(self): + + self.edisgo.set_time_series_worst_case_analysis() + # fmt: off + self.edisgo.timeseries.time_series_raw.\ + fluctuating_generators_active_power_by_technology = pd.DataFrame( + data={ + "wind": [1.23, 2.0, 5.0, 6.0], + "solar": [3.0, 4.0, 7.0, 8.0], + }, + index=self.edisgo.timeseries.timeindex, + ) + # fmt: on - assert self.timeseries.storage_units_active_power.shape == ( - 24, - num_storage_units + 3, - ) - assert self.timeseries.storage_units_reactive_power.shape == ( - 24, - num_storage_units + 3, - ) - assert np.isclose( - np.array( - self.timeseries.storage_units_active_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - dtype=float, - ), - [p_nom2 * 0.97, p_nom3 * 0.98], + # check with default value + assert (self.edisgo.timeseries.loads_active_power.dtypes == "float64").all() + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.dtypes + == "float64" ).all() - assert np.isclose( - np.array( - self.timeseries.storage_units_reactive_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - dtype=float, - ), - [ - -tan(acos(0.95)) * p_nom2 * 0.97, - -tan(acos(0.9)) * p_nom3 * 0.98, - ], + # fmt: on + self.edisgo.timeseries.reduce_memory() + assert (self.edisgo.timeseries.loads_active_power.dtypes == "float32").all() + assert (self.edisgo.timeseries.loads_reactive_power.dtypes == "float32").all() + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.dtypes + == "float32" ).all() - # check values when reactive power is inserted as timeseries - timeseries.add_storage_units_timeseries( - self, - [storage_name2, storage_name3], - timeseries_storage_units=new_storages_active_power, - timeseries_storage_units_reactive_power=new_storages_reactive_power, - ) - assert self.timeseries.storage_units_active_power.shape == ( - 24, - num_storage_units + 3, - ) - assert self.timeseries.storage_units_reactive_power.shape == ( - 24, - num_storage_units + 3, + # fmt: on + + # check arguments + self.edisgo.timeseries.reduce_memory( + to_type="float16", + attr_to_reduce=["loads_reactive_power"], + time_series_raw=False, ) - assert np.isclose( - self.timeseries.storage_units_active_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - [p_nom2 * 0.97, p_nom3 * 0.98], - ).all() - assert np.isclose( - self.timeseries.storage_units_reactive_power.loc[ - timeindex, [storage_name2, storage_name3] - ].values, - [p_nom2 * 0.5, p_nom3 * 0.4], + + assert (self.edisgo.timeseries.loads_active_power.dtypes == "float32").all() + assert (self.edisgo.timeseries.loads_reactive_power.dtypes == "float16").all() + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.dtypes + == "float32" ).all() - # remove added generators - self.topology.remove_storage_unit(storage_name) - self.topology.remove_storage_unit(storage_name2) - self.topology.remove_storage_unit(storage_name3) - - def test_check_timeseries_for_index_and_cols(self): - """Test check_timeseries_for_index_and_cols method""" - timeindex = pd.date_range("1/1/2017", periods=13, freq="H") - timeseries.get_component_timeseries( - edisgo_obj=self, mode="manual", timeindex=timeindex - ) - added_comps = ["Comp_1", "Comp_2"] - timeseries_with_wrong_timeindex = pd.DataFrame( - index=timeindex[0:12], - columns=added_comps, - data=np.random.rand(12, len(added_comps)), - ) - # Todo: check what happens with assertion. Why are strings not the same? - msg = ( - "Inserted timeseries for the following components have the a " - "wrong time index:" - ) - with pytest.raises(ValueError, match=msg): - timeseries.check_timeseries_for_index_and_cols( - self, timeseries_with_wrong_timeindex, added_comps - ) - timeseries_with_wrong_comp_names = pd.DataFrame( - index=timeindex, columns=["Comp_1"], data=np.random.rand(13, 1) - ) - msg = ( - "Columns of inserted timeseries are not the same " - "as names of components to be added. Timeseries " - "for the following components were tried to be " - "added:" - ) - with pytest.raises(ValueError, match=msg): - timeseries.check_timeseries_for_index_and_cols( - self, timeseries_with_wrong_comp_names, added_comps - ) + # fmt: on + + def test_to_csv(self): + + timeindex = pd.date_range("1/1/2018", periods=2, freq="H") + self.edisgo.set_timeindex(timeindex) - def create_random_timeseries_for_topology(self, timeindex): - # create random timeseries - load_names = self.topology.loads_df.index + # create dummy time series loads_active_power = pd.DataFrame( - index=timeindex, - columns=load_names, - data=np.multiply( - np.random.rand(len(timeindex), len(load_names)), - ([self.topology.loads_df.p_nom] * len(timeindex)), - ), - ) - loads_reactive_power = pd.DataFrame( - index=timeindex, - columns=load_names, - data=np.multiply( - np.random.rand(len(timeindex), len(load_names)), - ([self.topology.loads_df.p_nom] * len(timeindex)), - ), - ) - generator_names = self.topology.generators_df.index - generators_active_power = pd.DataFrame( - index=timeindex, - columns=generator_names, - data=np.multiply( - np.random.rand(len(timeindex), len(generator_names)), - ([self.topology.generators_df.p_nom] * len(timeindex)), - ), + {"load1": [1.4, 2.3], "load2": [2.4, 1.3]}, index=timeindex ) + self.edisgo.timeseries.loads_active_power = loads_active_power generators_reactive_power = pd.DataFrame( - index=timeindex, - columns=generator_names, - data=np.multiply( - np.random.rand(len(timeindex), len(generator_names)), - ([self.topology.generators_df.p_nom] * len(timeindex)), - ), + {"gen1": [1.4, 2.3], "gen2": [2.4, 1.3]}, index=timeindex ) - storage_names = self.topology.storage_units_df.index - storage_units_active_power = pd.DataFrame( - index=timeindex, - columns=storage_names, - data=np.multiply( - np.random.rand(len(timeindex), len(storage_names)), - ([self.topology.storage_units_df.p_nom] * len(timeindex)), - ), + self.edisgo.timeseries.generators_reactive_power = generators_reactive_power + # fmt: off + self.edisgo.timeseries.time_series_raw. \ + fluctuating_generators_active_power_by_technology = pd.DataFrame( + data={ + "wind": [1.23, 2.0], + "solar": [3.0, 4.0], + }, + index=self.edisgo.timeseries.timeindex, + ) + # fmt: on + + # test with default values + save_dir = os.path.join(os.getcwd(), "timeseries_csv") + self.edisgo.timeseries.to_csv(save_dir) + + files_in_timeseries_dir = os.listdir(save_dir) + assert len(files_in_timeseries_dir) == 2 + assert "loads_active_power.csv" in files_in_timeseries_dir + assert "generators_reactive_power.csv" in files_in_timeseries_dir + + shutil.rmtree(save_dir) + + # test with reduce memory True, to_type = float16 and saving TimeSeriesRaw + self.edisgo.timeseries.to_csv( + save_dir, reduce_memory=True, to_type="float16", time_series_raw=True ) - storage_units_reactive_power = pd.DataFrame( - index=timeindex, - columns=storage_names, - data=np.multiply( - np.random.rand(len(timeindex), len(storage_names)), - ([self.topology.storage_units_df.p_nom] * len(timeindex)), - ), + + assert ( + self.edisgo.timeseries.generators_reactive_power.dtypes == "float16" + ).all() + files_in_timeseries_dir = os.listdir(save_dir) + assert len(files_in_timeseries_dir) == 3 + files_in_timeseries_raw_dir = os.listdir( + os.path.join(save_dir, "time_series_raw") ) - return ( - generators_active_power, - generators_reactive_power, - loads_active_power, - loads_reactive_power, - storage_units_active_power, - storage_units_reactive_power, + assert len(files_in_timeseries_raw_dir) == 1 + assert ( + "fluctuating_generators_active_power_by_technology.csv" + in files_in_timeseries_raw_dir ) - def test_drop_existing_component_timeseries(self): - """Test for _drop_existing_timseries_method""" - storage_1 = self.topology.add_storage_unit("Bus_MVStation_1", 0.3) - timeindex = pd.date_range("1/1/1970", periods=2, freq="H") - timeseries.get_component_timeseries(edisgo_obj=self, mode="worst-case") - # test drop load timeseries - assert hasattr( - self.timeseries.loads_active_power, "Load_agricultural_LVGrid_1_1" + shutil.rmtree(save_dir, ignore_errors=True) + + def test_from_csv(self): + + timeindex = pd.date_range("1/1/2018", periods=2, freq="H") + self.edisgo.set_timeindex(timeindex) + + # create dummy time series + loads_reactive_power = pd.DataFrame( + {"load1": [1.4, 2.3], "load2": [2.4, 1.3]}, index=timeindex ) - assert hasattr( - self.timeseries.loads_reactive_power, - "Load_agricultural_LVGrid_1_1", + self.edisgo.timeseries.loads_reactive_power = loads_reactive_power + generators_active_power = pd.DataFrame( + {"gen1": [1.4, 2.3], "gen2": [2.4, 1.3]}, index=timeindex ) - timeseries._drop_existing_component_timeseries( - self, "loads", ["Load_agricultural_LVGrid_1_1"] + self.edisgo.timeseries.generators_active_power = generators_active_power + fluc_gen = pd.DataFrame( + data={ + "wind": [1.23, 2.0], + "solar": [3.0, 4.0], + }, + index=self.edisgo.timeseries.timeindex, ) - with pytest.raises(KeyError): - self.timeseries.loads_active_power.loc[ - timeindex, "Load_agricultural_LVGrid_1_1" - ] - with pytest.raises(KeyError): - self.timeseries.loads_reactive_power.loc[ - timeindex, "Load_agricultural_LVGrid_1_1" - ] - # test drop generators timeseries - assert hasattr( - self.timeseries.generators_active_power, "GeneratorFluctuating_7" + # fmt: off + self.edisgo.timeseries.time_series_raw. \ + fluctuating_generators_active_power_by_technology = fluc_gen + # fmt: on + + # write to csv + save_dir = os.path.join(os.getcwd(), "timeseries_csv") + self.edisgo.timeseries.to_csv(save_dir, time_series_raw=True) + + # reset TimeSeries + self.edisgo.timeseries.reset() + + self.edisgo.timeseries.from_csv(save_dir) + + pd.testing.assert_frame_equal( + self.edisgo.timeseries.loads_reactive_power, + loads_reactive_power, + check_freq=False, ) - assert hasattr( - self.timeseries.generators_reactive_power, "GeneratorFluctuating_7" + pd.testing.assert_frame_equal( + self.edisgo.timeseries.generators_active_power, + generators_active_power, + check_freq=False, ) - timeseries._drop_existing_component_timeseries( - self, "generators", "GeneratorFluctuating_7" + # fmt: off + assert ( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology.empty ) - with pytest.raises(KeyError): - self.timeseries.generators_active_power.loc[ - timeindex, "GeneratorFluctuating_7" - ] - with pytest.raises(KeyError): - self.timeseries.generators_reactive_power.loc[ - timeindex, "GeneratorFluctuating_7" - ] - # test drop storage units timeseries - assert hasattr(self.timeseries.storage_units_active_power, storage_1) - assert hasattr(self.timeseries.storage_units_reactive_power, storage_1) - timeseries._drop_existing_component_timeseries(self, "storage_units", storage_1) - with pytest.raises(KeyError): - self.timeseries.storage_units_active_power.loc[timeindex, storage_1] - with pytest.raises(KeyError): - self.timeseries.storage_units_reactive_power.loc[timeindex, storage_1] - self.topology.remove_storage_unit(storage_1) - - -class TestReactivePowerTimeSeriesFunctions: - @classmethod + # fmt: on + + self.edisgo.timeseries.from_csv(save_dir, time_series_raw=True) + + # fmt: off + pd.testing.assert_frame_equal( + self.edisgo.timeseries.time_series_raw. + fluctuating_generators_active_power_by_technology, + fluc_gen, + check_freq=False, + ) + # fmt: on + + shutil.rmtree(save_dir) + + def test_integrity_check(self, caplog): + attrs = [ + "loads_active_power", + "loads_reactive_power", + "generators_active_power", + "generators_reactive_power", + "storage_units_active_power", + "storage_units_reactive_power", + ] + # check warning empty timeindex + self.edisgo.timeseries.check_integrity() + assert "No time index set. Empty time series will be returned." in caplog.text + caplog.clear() + # add timeseries + index = pd.date_range("1/1/2018", periods=3, freq="H") + self.edisgo.timeseries.timeindex = index + for attr in attrs: + tmp = attr.split("_") + if len(tmp) == 3: + comp_type = tmp[0] + elif len(tmp) == 4: + comp_type = "_".join(tmp[0:2]) + comps = getattr(self.edisgo.topology, comp_type + "_df").index + setattr( + self.edisgo.timeseries, + comp_type + "_active_power", + pd.DataFrame(index=index, columns=comps, data=0), + ) + setattr( + self.edisgo.timeseries, + comp_type + "_reactive_power", + pd.DataFrame(index=index, columns=comps, data=0), + ) + # check warning for null values + for attr in attrs: + ts_tmp = getattr(self.edisgo.timeseries, attr) + if not ts_tmp.empty: + ts_tmp.iloc[0, 0] = np.NaN + setattr(self.edisgo.timeseries, attr, ts_tmp) + self.edisgo.timeseries.check_integrity() + assert "There are null values in {}".format(attr) in caplog.text + caplog.clear() + ts_tmp.iloc[0, 0] = 0 + setattr(self.edisgo.timeseries, attr, ts_tmp) + # check warning for duplicated indices and columns + for attr in attrs: + ts_tmp = getattr(self.edisgo.timeseries, attr) + if not ts_tmp.empty: + # check for duplicated indices + ts_tmp_duplicated = pd.concat([ts_tmp, ts_tmp.iloc[0:2]]) + setattr(self.edisgo.timeseries, attr, ts_tmp_duplicated) + self.edisgo.timeseries.check_integrity() + assert ( + "{} has duplicated indices: {}".format( + attr, ts_tmp.iloc[0:2].index.values + ) + in caplog.text + ) + caplog.clear() + setattr(self.edisgo.timeseries, attr, ts_tmp) + # check for duplicated columns + ts_tmp_duplicated = pd.concat([ts_tmp, ts_tmp.iloc[:, 0:2]], axis=1) + setattr(self.edisgo.timeseries, attr, ts_tmp_duplicated) + self.edisgo.timeseries.check_integrity() + assert ( + "{} has duplicated columns: {}".format( + attr, ts_tmp.iloc[:, 0:2].columns.values + ) + in caplog.text + ) + caplog.clear() + setattr(self.edisgo.timeseries, attr, ts_tmp) + + +class TestTimeSeriesRaw: + @pytest.fixture(autouse=True) def setup_class(self): - self.topology = Topology() - self.timeseries = timeseries.TimeSeries() - self.config = Config() - ding0_import.import_ding0_grid(pytest.ding0_test_network_path, self) - self.timeseries.timeindex = pd.date_range("1/1/1970", periods=2, freq="H") - - def test_set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, - ): - - # test for component_type="generators" - comp_mv_1 = "Generator_1" - comp_mv_2 = "GeneratorFluctuating_2" - comp_lv_1 = "GeneratorFluctuating_25" - comp_lv_2 = "GeneratorFluctuating_26" - - active_power_ts = pd.DataFrame( + # add dummy time series + self.time_series_raw = timeseries.TimeSeriesRaw() + timeindex = pd.date_range("1/1/2018", periods=4, freq="H") + self.df = pd.DataFrame( data={ - comp_mv_1: [0.5, 1.5], - comp_mv_2: [2.5, 3.5], - comp_lv_1: [0.1, 0.0], - comp_lv_2: [0.15, 0.07], + "residential": [1.23, 2.0, 5.0, 6.0], + "industrial": [3.0, 4.0, 7.0, 8.0], }, - index=self.timeseries.timeindex, + index=timeindex, ) - self.timeseries.generators_active_power = active_power_ts - - timeseries._set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, - self.topology.generators_df.loc[[comp_mv_1, comp_mv_2, comp_lv_1], :], - "generators", + self.time_series_raw.conventional_loads_active_power_by_sector = self.df + self.time_series_raw.charging_points_active_power_by_use_case = self.df + self.q_control = pd.DataFrame( + { + "type": ["fixed_cosphi", "fixed_cosphi"], + "q_sign": [1, -1], + "power_factor": [1.0, 0.98], + "parametrisation": [np.nan, np.nan], + }, + index=["gen_1", "laod_2"], ) + self.time_series_raw.q_control = self.q_control - assert self.timeseries.generators_reactive_power.shape == (2, 3) - assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - :, [comp_mv_1, comp_mv_2] - ].values, - active_power_ts.loc[:, [comp_mv_1, comp_mv_2]].values * -0.484322, + def test_reduce_memory(self): + + # check with default value + assert ( + self.time_series_raw.conventional_loads_active_power_by_sector.dtypes + == "float64" ).all() - assert np.isclose( - self.timeseries.generators_reactive_power.loc[:, comp_lv_1].values, - active_power_ts.loc[:, comp_lv_1] * -0.328684, + assert self.time_series_raw.q_control.power_factor.dtype == "float64" + self.time_series_raw.reduce_memory() + assert ( + self.time_series_raw.conventional_loads_active_power_by_sector.dtypes + == "float32" ).all() + assert ( + self.time_series_raw.charging_points_active_power_by_use_case.dtypes + == "float32" + ).all() + assert self.time_series_raw.q_control.power_factor.dtype == "float64" - timeseries._set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, self.topology.generators_df.loc[[comp_lv_2], :], "generators" + # check arguments + self.time_series_raw.reduce_memory( + to_type="float16", + attr_to_reduce=["conventional_loads_active_power_by_sector"], ) - # check new time series and that old reactive power time series - # remained unchanged - assert self.timeseries.generators_reactive_power.shape == (2, 4) - assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - :, [comp_mv_1, comp_mv_2] - ].values, - active_power_ts.loc[:, [comp_mv_1, comp_mv_2]].values * -0.484322, + assert ( + self.time_series_raw.conventional_loads_active_power_by_sector.dtypes + == "float16" ).all() - assert np.isclose( - self.timeseries.generators_reactive_power.loc[ - :, [comp_lv_1, comp_lv_2] - ].values, - active_power_ts.loc[:, [comp_lv_1, comp_lv_2]] * -0.328684, + assert ( + self.time_series_raw.charging_points_active_power_by_use_case.dtypes + == "float32" + ).all() + + def test_to_csv(self): + + # test with default values + save_dir = os.path.join(os.getcwd(), "timeseries_csv") + self.time_series_raw.to_csv(save_dir) + + files_in_timeseries_dir = os.listdir(save_dir) + assert len(files_in_timeseries_dir) == 3 + assert ( + "conventional_loads_active_power_by_sector.csv" in files_in_timeseries_dir + ) + assert "charging_points_active_power_by_use_case.csv" in files_in_timeseries_dir + assert "q_control.csv" in files_in_timeseries_dir + + shutil.rmtree(save_dir) + + # test with reduce memory True, to_type = float16 and saving TimeSeriesRaw + self.time_series_raw.to_csv(save_dir, reduce_memory=True, to_type="float16") + + assert ( + self.time_series_raw.conventional_loads_active_power_by_sector.dtypes + == "float16" ).all() + files_in_timeseries_dir = os.listdir(save_dir) + assert len(files_in_timeseries_dir) == 3 + + shutil.rmtree(save_dir, ignore_errors=True) + + def test_from_csv(self): + + # write to csv + save_dir = os.path.join(os.getcwd(), "timeseries_csv") + self.time_series_raw.to_csv(save_dir, time_series_raw=True) + + # reset TimeSeriesRaw + self.time_series_raw = timeseries.TimeSeriesRaw() + + self.time_series_raw.from_csv(save_dir) + + pd.testing.assert_frame_equal( + self.time_series_raw.conventional_loads_active_power_by_sector, + self.df, + check_freq=False, + ) + pd.testing.assert_frame_equal( + self.time_series_raw.charging_points_active_power_by_use_case, + self.df, + check_freq=False, + ) + pd.testing.assert_frame_equal( + self.time_series_raw.q_control, + self.q_control, + check_freq=False, + ) + + shutil.rmtree(save_dir) + + +class TestTimeSeriesHelperFunctions: + def test_drop_component_time_series(self): - # test for component_type="loads" - # change bus of load so that it becomes MV load - comp_mv_1 = "Load_retail_MVGrid_1_Load_aggregated_retail_MVGrid_1_1" - self.topology._loads_df.at[comp_mv_1, "bus"] = "Bus_BranchTee_MVGrid_1_1" - comp_lv_1 = "Load_residential_LVGrid_7_2" - comp_lv_2 = "Load_agricultural_LVGrid_8_1" + time_series_obj = timeseries.TimeSeries() - active_power_ts = pd.DataFrame( + # check that no error is raised in case of empty dataframe + timeseries.drop_component_time_series( + time_series_obj, "loads_active_power", "Load1" + ) + + # add dummy time series + time_series_obj.timeindex = pd.date_range("1/1/2018", periods=4, freq="H") + df = pd.DataFrame( data={ - comp_mv_1: [0.5, 1.5], - comp_lv_1: [0.1, 0.0], - comp_lv_2: [0.15, 0.07], + "load_1": [1.23, 2.0, 5.0, 6.0], + "load_2": [3.0, 4.0, 7.0, 8.0], }, - index=self.timeseries.timeindex, + index=time_series_obj.timeindex, ) - self.timeseries.loads_active_power = active_power_ts + time_series_obj.loads_active_power = df - timeseries._set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, - self.topology.loads_df.loc[[comp_mv_1, comp_lv_1], :], - "loads", + # check with dropping one existing load and one non-existing load + timeseries.drop_component_time_series( + time_series_obj, "loads_active_power", ["Load1", "load_1"] ) + assert time_series_obj.loads_active_power.shape == (4, 1) + assert "load_1" not in time_series_obj.loads_active_power.columns - assert self.timeseries.loads_reactive_power.shape == (2, 2) - assert np.isclose( - self.timeseries.loads_reactive_power.loc[:, [comp_mv_1]].values, - active_power_ts.loc[:, [comp_mv_1]].values * 0.484322, - ).all() - assert np.isclose( - self.timeseries.loads_reactive_power.loc[:, comp_lv_1].values, - active_power_ts.loc[:, comp_lv_1] * 0.328684, - ).all() + # check with dropping all existing loads + timeseries.drop_component_time_series( + time_series_obj, "loads_active_power", ["load_2"] + ) + assert time_series_obj.loads_active_power.empty - timeseries._set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, self.topology.loads_df.loc[[comp_lv_2], :], "loads" + def test_add_component_time_series(self): + + time_series_obj = timeseries.TimeSeries() + time_series_obj.timeindex = pd.date_range("1/1/2018", periods=4, freq="H") + + df = pd.DataFrame( + data={ + "load_1": [1.23, 2.0, 5.0, 6.0], + "load_2": [3.0, 4.0, 7.0, 8.0], + }, + index=time_series_obj.timeindex, ) - assert self.timeseries.loads_reactive_power.shape == (2, 3) - assert np.isclose( - self.timeseries.loads_reactive_power.loc[:, comp_lv_2].values, - active_power_ts.loc[:, comp_lv_2] * 0.328684, - ).all() + # check with matching time index + timeseries._add_component_time_series(time_series_obj, "loads_active_power", df) + assert time_series_obj.loads_active_power.shape == (4, 2) + assert "load_1" in time_series_obj.loads_active_power.columns + + # check with time indexes that do not match + df = pd.DataFrame( + data={ + "load_3": [5.0, 6.0], + "load_4": [7.0, 8.0], + }, + index=time_series_obj.timeindex[0:2], + ) + timeseries._add_component_time_series( + time_series_obj, "loads_active_power", df.iloc[:2] + ) + assert time_series_obj.loads_active_power.shape == (4, 4) + assert "load_3" in time_series_obj.loads_active_power.columns - # test for component_type="storage_units" - comp_mv_1 = "Storage_1" + def test_check_if_components_exist(self): + edisgo_obj = EDisGo(ding0_grid=pytest.ding0_test_network_path) - active_power_ts = pd.DataFrame( - data={comp_mv_1: [0.5, 1.5]}, index=self.timeseries.timeindex + # check all components exist + component_names = timeseries._check_if_components_exist( + edisgo_obj, + ["GeneratorFluctuating_15", "GeneratorFluctuating_24"], + "generators", ) - self.timeseries.storage_units_active_power = active_power_ts + assert len(component_names) == 2 + assert "GeneratorFluctuating_15" in component_names - timeseries._set_reactive_power_time_series_for_fixed_cosphi_using_config( - self, - self.topology.storage_units_df.loc[[comp_mv_1], :], - "storage_units", + # check no components exist + component_names = timeseries._check_if_components_exist( + edisgo_obj, ["Storage_3"], "storage_units" ) + assert len(component_names) == 0 - assert self.timeseries.storage_units_reactive_power.shape == (2, 1) - assert np.isclose( - self.timeseries.storage_units_reactive_power.loc[:, [comp_mv_1]].values, - active_power_ts.loc[:, [comp_mv_1]].values * -0.484322, - ).all() + # check some components exist + component_names = timeseries._check_if_components_exist( + edisgo_obj, + ["Load_residential_LVGrid_5_3", "Load_residential_LVGrid_5"], + "loads", + ) + assert len(component_names) == 1 + assert "Load_residential_LVGrid_5_3" in component_names diff --git a/tests/network/test_topology.py b/tests/network/test_topology.py index 5b59ea50d..b6ba8b93f 100644 --- a/tests/network/test_topology.py +++ b/tests/network/test_topology.py @@ -1,3 +1,4 @@ +import logging import os import shutil @@ -5,6 +6,8 @@ import pandas as pd import pytest +from geopandas import GeoDataFrame +from pandas.testing import assert_frame_equal from shapely.geometry import Point from edisgo import EDisGo @@ -12,6 +15,9 @@ from edisgo.network.components import Switch from edisgo.network.grids import LVGrid from edisgo.network.topology import Topology +from edisgo.tools.geopandas_helper import GeoPandasGridContainer + +logger = logging.getLogger(__name__) class TestTopology: @@ -206,32 +212,32 @@ def test_add_load(self): name = self.topology.add_load( load_id=10, bus="Bus_BranchTee_LVGrid_1_4", - p_nom=1, + p_set=1, annual_consumption=2, sector="residential", test_info="test", ) assert len_df_before + 1 == len(self.topology.loads_df) - assert name == "Load_LVGrid_1_residential_10" - assert self.topology.loads_df.at[name, "p_nom"] == 1 + assert name == "Conventional_Load_LVGrid_1_residential_10" + assert self.topology.loads_df.at[name, "p_set"] == 1 assert self.topology.loads_df.at[name, "test_info"] == "test" # test without kwargs name = self.topology.add_load( - bus="Bus_BranchTee_LVGrid_1_4", p_nom=2, annual_consumption=1 + bus="Bus_BranchTee_LVGrid_1_4", p_set=2, annual_consumption=1 ) assert len_df_before + 2 == len(self.topology.loads_df) - assert name == "Load_LVGrid_1_1" - assert self.topology.loads_df.loc[name, "p_nom"] == 2 + assert name == "Conventional_Load_LVGrid_1_9" + assert self.topology.loads_df.loc[name, "p_set"] == 2 assert self.topology.loads_df.loc[name, "sector"] is np.nan # test without kwargs (name created using number of loads in grid) name = self.topology.add_load( - bus="Bus_BranchTee_LVGrid_1_4", p_nom=3, annual_consumption=1 + bus="Bus_BranchTee_LVGrid_1_4", p_set=3, annual_consumption=1 ) assert len_df_before + 3 == len(self.topology.loads_df) - assert name == "Load_LVGrid_1_2" - assert self.topology.loads_df.loc[name, "p_nom"] == 3 + assert name == "Conventional_Load_LVGrid_1_10" + assert self.topology.loads_df.loc[name, "p_set"] == 3 # test error raising if bus is not valid msg = ( @@ -242,7 +248,7 @@ def test_add_load(self): self.topology.add_load( load_id=8, bus="Unknown_bus", - p_nom=1, + p_set=1, annual_consumption=1, sector="retail", ) @@ -254,14 +260,14 @@ def test_add_load(self): # test with kwargs name = self.topology.add_load( bus="Bus_BranchTee_MVGrid_1_8", - p_nom=1, + p_set=1, type="charging_point", sector="home", number=2, test_info="test", ) assert len_df_before + 1 == len(self.topology.charging_points_df) - assert name == "ChargingPoint_MVGrid_1_home_0" + assert name == "Charging_Point_MVGrid_1_home_1" assert self.topology.charging_points_df.at[name, "sector"] == "home" assert self.topology.charging_points_df.at[name, "test_info"] == "test" @@ -269,12 +275,12 @@ def test_add_load(self): name = self.topology.add_load( bus="Bus_BranchTee_LVGrid_1_2", type="charging_point", - p_nom=0.5, + p_set=0.5, sector="work", ) assert len_df_before + 2 == len(self.topology.charging_points_df) - assert name == "ChargingPoint_LVGrid_1_work_0" - assert self.topology.charging_points_df.at[name, "p_nom"] == 0.5 + assert name == "Charging_Point_LVGrid_1_work_1" + assert self.topology.charging_points_df.at[name, "p_set"] == 0.5 # test error raising if bus is not valid msg = ( @@ -282,7 +288,7 @@ def test_add_load(self): "buses_df." ) with pytest.raises(ValueError, match=msg): - self.topology.add_load(bus="Unknown_bus", p_nom=0.5, sector="work") + self.topology.add_load(bus="Unknown_bus", p_set=0.5, sector="work") def test_add_generator(self): """Test add_generator method""" @@ -337,14 +343,14 @@ def test_add_storage_unit(self): test_info="test", ) assert len_df_before + 1 == len(self.topology.storage_units_df) - assert name == "StorageUnit_LVGrid_1_0" + assert name == "StorageUnit_LVGrid_1_1" assert self.topology.storage_units_df.at[name, "p_nom"] == 1 assert self.topology.storage_units_df.loc[name, "test_info"] == "test" # test without kwargs name = self.topology.add_storage_unit(bus="Bus_BranchTee_LVGrid_1_6", p_nom=2) assert len_df_before + 2 == len(self.topology.storage_units_df) - assert name == "StorageUnit_LVGrid_1_1" + assert name == "StorageUnit_LVGrid_1_2" assert self.topology.storage_units_df.at[name, "p_nom"] == 2 assert self.topology.storage_units_df.at[name, "control"] == "PQ" @@ -513,14 +519,17 @@ def test_check_line_for_removal(self): # test line in ring # add line to create ring - self.topology.lines_df = self.topology.lines_df.append( - pd.DataFrame( - data={ - "bus0": "Bus_BranchTee_LVGrid_2_2", - "bus1": "Bus_BranchTee_LVGrid_2_3", - }, - index=["TestLine"], - ) + self.topology.lines_df = pd.concat( + [ + self.topology.lines_df, + pd.DataFrame( + data={ + "bus0": "Bus_BranchTee_LVGrid_2_2", + "bus1": "Bus_BranchTee_LVGrid_2_3", + }, + index=["TestLine"], + ), + ] ) return_value = self.topology._check_line_for_removal("TestLine") assert return_value @@ -556,7 +565,7 @@ def test_remove_load(self): # check case where only charging point is connected to line, # line and bus are therefore removed as well - name = "ChargingPoint_LVGrid_1_0" + name = "ChargingPoint_LVGrid_1_work_1" bus = "Bus_Load_agricultural_LVGrid_1_1" # get connected line connected_lines = self.topology.get_connected_lines_from_bus(bus) @@ -568,7 +577,7 @@ def test_remove_load(self): assert ~(connected_lines.index.isin(self.topology.lines_df.index)).any() # check case where charging point is not the only connected element - name = "ChargingPoint_MVGrid_1_0" + name = "ChargingPoint_MVGrid_1_home_1" bus = "Bus_BranchTee_MVGrid_1_8" # get connected lines connected_lines = self.topology.get_connected_lines_from_bus(bus) @@ -607,7 +616,7 @@ def test_remove_storage_unit(self): # check case where only storage unit is connected to line, # line and bus are therefore removed as well - name = "StorageUnit_LVGrid_1_1" + name = "StorageUnit_LVGrid_1_2" bus = "Bus_BranchTee_LVGrid_1_6" # get connected line connected_lines = self.topology.get_connected_lines_from_bus(bus) @@ -619,7 +628,7 @@ def test_remove_storage_unit(self): assert ~(connected_lines.index.isin(self.topology.lines_df.index)).any() # check case where storage is not the only connected element - name = "StorageUnit_LVGrid_1_0" + name = "StorageUnit_LVGrid_1_1" bus = "Bus_BranchTee_LVGrid_1_3" # get connected lines connected_lines = self.topology.get_connected_lines_from_bus(bus) @@ -633,20 +642,23 @@ def test_remove_line(self): # test try removing line that cannot be removed msg = "Removal of line Line_30000010 would create isolated node." - with pytest.raises(AssertionError, match=msg): + with pytest.warns(UserWarning, match=msg): self.topology.remove_line("Line_30000010") # test remove line in cycle (no bus is removed) # add line to create ring line_name = "TestLine_LVGrid_3" - self.topology.lines_df = self.topology.lines_df.append( - pd.DataFrame( - data={ - "bus0": "Bus_BranchTee_LVGrid_3_2", - "bus1": "Bus_BranchTee_LVGrid_3_5", - }, - index=[line_name], - ) + self.topology.lines_df = pd.concat( + [ + self.topology.lines_df, + pd.DataFrame( + data={ + "bus0": "Bus_BranchTee_LVGrid_3_2", + "bus1": "Bus_BranchTee_LVGrid_3_5", + }, + index=[line_name], + ), + ] ) len_df_before = len(self.topology.lines_df) @@ -681,11 +693,14 @@ def test_remove_bus(self): # test bus can be removed # create isolated bus bus_name = "TestBusIsolated" - self.topology.buses_df = self.topology.buses_df.append( - pd.DataFrame( - data={"v_nom": 20}, - index=[bus_name], - ) + self.topology.buses_df = pd.concat( + [ + self.topology.buses_df, + pd.DataFrame( + data={"v_nom": 20}, + index=[bus_name], + ), + ] ) len_df_before = len(self.topology.buses_df) self.topology.remove_bus(bus_name) @@ -802,10 +817,36 @@ class TestTopologyWithEdisgoObject: @pytest.yield_fixture(autouse=True) def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() + + def test_to_geopandas(self): + geopandas_container = self.edisgo.topology.to_geopandas() + + assert isinstance(geopandas_container, GeoPandasGridContainer) + + attrs = [ + "buses_gdf", + "generators_gdf", + "lines_gdf", + "loads_gdf", + "storage_units_gdf", + "transformers_gdf", + ] + + for attr_str in attrs: + attr = getattr(geopandas_container, attr_str) + grid_attr = getattr( + self.edisgo.topology.mv_grid, attr_str.replace("_gdf", "_df") + ) + + assert isinstance(attr, GeoDataFrame) + + common_cols = list(set(attr.columns).intersection(grid_attr.columns)) + + assert_frame_equal( + attr[common_cols], grid_attr[common_cols], check_names=False + ) def test_from_csv(self): """ @@ -982,16 +1023,16 @@ def test_connect_to_mv(self): x = self.edisgo.topology.buses_df.at["Bus_GeneratorFluctuating_2", "x"] y = self.edisgo.topology.buses_df.at["Bus_GeneratorFluctuating_2", "y"] geom = Point((x, y)) - test_gen = { + test_cp = { "geom": geom, - "p_nom": 2.5, + "p_set": 2.5, "sector": "fast", "number": 10, "voltage_level": 4, } comp_name = self.edisgo.topology.connect_to_mv( - self.edisgo, test_gen, comp_type="ChargingPoint" + self.edisgo, test_cp, comp_type="charging_point" ) # check if number of buses increased @@ -1016,7 +1057,7 @@ def test_connect_to_mv(self): # check new generator assert ( self.edisgo.topology.charging_points_df.at[comp_name, "number"] - == test_gen["number"] + == test_cp["number"] ) def test_connect_to_lv(self): @@ -1267,7 +1308,7 @@ def test_connect_to_lv(self): # add charging point test_cp = { - "p_nom": 0.01, + "p_set": 0.01, "geom": geom, "sector": "home", "voltage_level": 7, @@ -1275,7 +1316,7 @@ def test_connect_to_lv(self): } comp_name = self.edisgo.topology.connect_to_lv( - self.edisgo, test_cp, comp_type="ChargingPoint" + self.edisgo, test_cp, comp_type="charging_point" ) # check that number of buses stayed the same @@ -1290,7 +1331,7 @@ def test_connect_to_lv(self): assert bus == "Bus_BranchTee_LVGrid_3_6" assert self.edisgo.topology.buses_df.at[bus, "lv_grid_id"] == 3 # check new charging point - assert self.edisgo.topology.charging_points_df.at[comp_name, "p_nom"] == 0.01 + assert self.edisgo.topology.charging_points_df.at[comp_name, "p_set"] == 0.01 # test voltage level 7 - use case work (connected to agricultural load) @@ -1300,7 +1341,7 @@ def test_connect_to_lv(self): # add charging point test_cp = { - "p_nom": 0.02, + "p_set": 0.02, "number": 2, "geom": geom, "sector": "work", @@ -1309,7 +1350,7 @@ def test_connect_to_lv(self): } comp_name = self.edisgo.topology.connect_to_lv( - self.edisgo, test_cp, comp_type="ChargingPoint" + self.edisgo, test_cp, comp_type="charging_point" ) # check that number of buses stayed the same @@ -1335,7 +1376,7 @@ def test_connect_to_lv(self): # add charging point test_cp = { - "p_nom": 0.02, + "p_set": 0.02, "number": 2, "geom": geom, "sector": "public", @@ -1344,7 +1385,7 @@ def test_connect_to_lv(self): } comp_name = self.edisgo.topology.connect_to_lv( - self.edisgo, test_cp, comp_type="ChargingPoint" + self.edisgo, test_cp, comp_type="charging_point" ) # check that number of buses stayed the same @@ -1360,3 +1401,109 @@ def test_connect_to_lv(self): assert self.edisgo.topology.buses_df.at[bus, "lv_grid_id"] == 3 # check new charging point assert self.edisgo.topology.charging_points_df.at[comp_name, "number"] == 2 + + def test_check_integrity(self, caplog): + """Test of validation of grids.""" + comps_dict = { + "buses": "BusBar_MVGrid_1_LVGrid_2_MV", + "generators": "GeneratorFluctuating_14", + "loads": "Load_residential_LVGrid_3_2", + "transformers": "LVStation_5_transformer_1", + "lines": "Line_10014", + "switches": "circuit_breaker_1", + } + # check duplicate node + for comp, name in comps_dict.items(): + new_comp = getattr(self.edisgo.topology, "_{}_df".format(comp)).loc[name] + comps = getattr(self.edisgo.topology, "_{}_df".format(comp)) + setattr(self.edisgo.topology, "_{}_df".format(comp), comps.append(new_comp)) + self.edisgo.topology.check_integrity() + assert ( + f"{name} have duplicate entry in one of the following components' " + f"dataframes: {comp}." in caplog.text + ) + caplog.clear() + + # reset dataframe + setattr(self.edisgo.topology, "_{}_df".format(comp), comps) + self.edisgo.topology.check_integrity() + + # check not connected generator and load + for nodal_component in ["loads", "generators"]: + comps = getattr(self.edisgo.topology, "_{}_df".format(nodal_component)) + new_comp = comps.loc[comps_dict[nodal_component]] + new_comp.name = "new_nodal_component" + new_comp.bus = "Non_existent_bus_" + nodal_component + setattr( + self.edisgo.topology, + "_{}_df".format(nodal_component), + comps.append(new_comp), + ) + self.edisgo.topology.check_integrity() + assert ( + "The following {} have buses which are not defined: {}.".format( + nodal_component, new_comp.name + ) + in caplog.text + ) + caplog.clear() + # reset dataframe + setattr(self.edisgo.topology, "_{}_df".format(nodal_component), comps) + self.edisgo.topology.check_integrity() + + # check branch components + i = 0 + for branch_component in ["lines", "transformers"]: + comps = getattr(self.edisgo.topology, "_{}_df".format(branch_component)) + new_comp = comps.loc[comps_dict[branch_component]] + new_comp.name = "new_branch_component" + setattr( + new_comp, + "bus" + str(i), + "Non_existent_bus_" + branch_component, + ) + setattr( + self.edisgo.topology, + "_{}_df".format(branch_component), + comps.append(new_comp), + ) + self.edisgo.topology.check_integrity() + assert ( + "The following {} have bus{} which are not defined: {}.".format( + branch_component, i, new_comp.name + ) + in caplog.text + ) + caplog.clear() + # reset dataframe + setattr(self.edisgo.topology, "_{}_df".format(branch_component), comps) + self.edisgo.topology.check_integrity() + i += 1 + + # check switches + comps = self.edisgo.topology.switches_df + for attr in ["bus_open", "bus_closed"]: + new_comp = comps.loc[comps_dict["switches"]] + new_comp.name = "new_switch" + new_comps = comps.append(new_comp) + new_comps.at[new_comp.name, attr] = "Non_existent_" + attr + self.edisgo.topology.switches_df = new_comps + self.edisgo.topology.check_integrity() + assert ( + "The following switches have {} which are not defined: {}.".format( + attr, new_comp.name + ) + in caplog.text + ) + caplog.clear() + self.edisgo.topology.switches_df = comps + self.edisgo.topology.check_integrity() + + # check isolated node + bus = self.edisgo.topology.buses_df.loc[comps_dict["buses"]] + bus.name = "New_bus" + self.edisgo.topology.buses_df = self.edisgo.topology.buses_df.append(bus) + self.edisgo.topology.check_integrity() + assert "The following buses are isolated: {}.".format(bus.name) in caplog.text + assert "The network has isolated nodes or edges." in caplog.text + caplog.clear() diff --git a/tests/test_edisgo.py b/tests/test_edisgo.py index 42128e2bc..1de7f4485 100755 --- a/tests/test_edisgo.py +++ b/tests/test_edisgo.py @@ -1,320 +1,310 @@ +import logging import os import shutil -from math import acos, tan - import numpy as np import pandas as pd import pytest from matplotlib import pyplot as plt +from pandas.util.testing import assert_frame_equal from shapely.geometry import Point from edisgo import EDisGo -from edisgo.flex_opt import check_tech_constraints as checks class TestEDisGo: - @classmethod - def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", + @pytest.fixture(autouse=True) + def setup_fixture(self): + """ + Fixture to set up new EDisGo object before each test function. + + """ + self.setup_edisgo_object() + + def setup_edisgo_object(self): + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + + def setup_worst_case_time_series(self): + self.edisgo.set_time_series_worst_case_analysis() + + def test_set_time_series_manual(self, caplog): + + timeindex = pd.date_range("1/1/2018", periods=3, freq="H") + gens_ts = pd.DataFrame( + data={ + "GeneratorFluctuating_15": [2.0, 5.0, 6.0], + "GeneratorFluctuating_24": [4.0, 7.0, 8.0], + }, + index=timeindex, + ) + loads_ts = pd.DataFrame( + data={ + "Load_residential_LVGrid_5_3": [2.0, 5.0, 6.0], + }, + index=timeindex, + ) + storage_units_ts = pd.DataFrame( + data={ + "Storage_1": [4.0, 7.0, 8.0], + }, + index=timeindex, + ) + + # test setting some time series and with no time index being previously set + with caplog.at_level(logging.WARNING): + self.edisgo.set_time_series_manual( + generators_p=gens_ts, + generators_q=gens_ts, + loads_p=loads_ts, + storage_units_q=storage_units_ts, + ) + assert ( + "When setting time series manually a time index is not automatically " + "set" in caplog.text + ) + assert self.edisgo.timeseries.generators_active_power.shape == (0, 2) + self.edisgo.set_timeindex(timeindex) + assert_frame_equal(gens_ts, self.edisgo.timeseries.generators_active_power) + assert_frame_equal(gens_ts, self.edisgo.timeseries.generators_reactive_power) + assert_frame_equal(loads_ts, self.edisgo.timeseries.loads_active_power) + assert self.edisgo.timeseries.loads_reactive_power.empty + assert self.edisgo.timeseries.storage_units_active_power.empty + assert_frame_equal( + storage_units_ts, self.edisgo.timeseries.storage_units_reactive_power + ) + + # test overwriting time series and with some components that do not exist + timeindex2 = pd.date_range("1/1/2018", periods=4, freq="H") + gens_ts2 = pd.DataFrame( + data={ + "GeneratorFluctuating_15": [1.0, 2.0, 5.0, 6.0], + "GeneratorFluctuating_14": [5.0, 2.0, 5.0, 6.0], + "GeneratorFluctuating_x": [8.0, 4.0, 7.0, 8.0], + }, + index=timeindex2, + ) + loads_ts2 = pd.DataFrame( + data={ + "Load_residential_LVGrid_5_3": [2.0, 5.0, 6.0], + "Load_residential_LVGrid_x": [2.0, 5.0, 6.0], + }, + index=timeindex, + ) + self.edisgo.set_time_series_manual( + generators_p=gens_ts2, loads_p=loads_ts2, storage_units_p=storage_units_ts + ) + assert self.edisgo.timeseries.generators_active_power.shape == (3, 3) + assert_frame_equal( + gens_ts2.loc[ + timeindex, ["GeneratorFluctuating_15", "GeneratorFluctuating_14"] + ], + self.edisgo.timeseries.generators_active_power.loc[ + :, ["GeneratorFluctuating_15", "GeneratorFluctuating_14"] + ], + ) + assert_frame_equal( + gens_ts.loc[:, ["GeneratorFluctuating_24"]], + self.edisgo.timeseries.generators_active_power.loc[ + :, ["GeneratorFluctuating_24"] + ], + ) + assert_frame_equal(gens_ts, self.edisgo.timeseries.generators_reactive_power) + assert_frame_equal( + loads_ts2.loc[:, ["Load_residential_LVGrid_5_3"]], + self.edisgo.timeseries.loads_active_power.loc[ + :, ["Load_residential_LVGrid_5_3"] + ], + ) + assert self.edisgo.timeseries.loads_reactive_power.empty + assert_frame_equal( + storage_units_ts, self.edisgo.timeseries.storage_units_active_power + ) + assert_frame_equal( + storage_units_ts, self.edisgo.timeseries.storage_units_reactive_power ) - self.timesteps = pd.date_range("1/1/1970", periods=2, freq="H") - def test_exceptions(self): - msg = ( - "No power flow results to check over-load for. Please perform " - "power flow analysis first." + def test_set_time_series_worst_case_analysis(self): + self.edisgo.set_time_series_worst_case_analysis( + cases="load_case", generators_names=["Generator_1"], loads_names=[] ) - with pytest.raises(Exception, match=msg): - checks.mv_line_load(self.edisgo) - self.edisgo.analyze() - msg = "Inserted grid is invalid." - with pytest.raises(ValueError, match=msg): - checks._station_load(self.edisgo, None) + assert self.edisgo.timeseries.generators_active_power.shape == (2, 1) + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 1) + assert self.edisgo.timeseries.loads_active_power.shape == (2, 0) + assert self.edisgo.timeseries.loads_reactive_power.shape == (2, 0) + assert self.edisgo.timeseries.storage_units_active_power.shape == (2, 1) + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (2, 1) - def test_save(self): - cur_dir = os.getcwd() - self.edisgo.save(cur_dir) - # Todo: check values? - # Todo: check files before rmtree? - shutil.rmtree(os.path.join(cur_dir, "results")) - shutil.rmtree(os.path.join(cur_dir, "topology")) - shutil.rmtree(os.path.join(cur_dir, "timeseries")) - - def test_crit_station(self): - # TODO: have checks of technical constraints not require edisgo - # object and then move this test - # calculate results if not already existing - if self.edisgo.results.pfa_p.empty: - self.edisgo.analyze() - # check results - overloaded_mv_station = checks.hv_mv_station_load(self.edisgo) - assert overloaded_mv_station.empty - overloaded_lv_station = checks.mv_lv_station_load(self.edisgo) - assert len(overloaded_lv_station) == 4 - assert np.isclose( - overloaded_lv_station.at["LVGrid_1", "s_missing"], - 0.01936, - atol=1e-5, + self.edisgo.set_time_series_worst_case_analysis() + assert self.edisgo.timeseries.generators_active_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), ) - assert overloaded_lv_station.at["LVGrid_1", "time_index"] == self.timesteps[1] - assert np.isclose( - overloaded_lv_station.at["LVGrid_4", "s_missing"], - 0.03427, - atol=1e-5, - ) - assert overloaded_lv_station.at["LVGrid_4", "time_index"] == self.timesteps[0] - - def test_crit_lines(self): - # TODO: have checks of technical constraints not require edisgo - # object and then move this test - if self.edisgo.results.i_res.empty: - self.edisgo.analyze() - mv_crit_lines = checks.mv_line_load(self.edisgo) - lv_crit_lines = checks.lv_line_load(self.edisgo) - assert len(lv_crit_lines) == 2 - assert (lv_crit_lines.time_index == self.timesteps[1]).all() - assert np.isclose( - lv_crit_lines.at["Line_50000002", "max_rel_overload"], - 1.02055, - atol=1e-5, + assert self.edisgo.timeseries.generators_reactive_power.shape == ( + 4, + len(self.edisgo.topology.generators_df), ) - assert np.isclose( - lv_crit_lines.at["Line_60000001", "max_rel_overload"], - 1.03730, - atol=1e-5, + assert self.edisgo.timeseries.loads_active_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), ) - assert len(mv_crit_lines) == 4 - assert (mv_crit_lines.time_index == self.timesteps[0]).all() - assert np.isclose( - mv_crit_lines.at["Line_10006", "max_rel_overload"], - 1.16306, - atol=1e-5, + assert self.edisgo.timeseries.loads_reactive_power.shape == ( + 4, + len(self.edisgo.topology.loads_df), ) - assert np.isclose( - mv_crit_lines.at["Line_10026", "max_rel_overload"], - 1.06230, - atol=1e-5, + assert self.edisgo.timeseries.storage_units_active_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), + ) + assert self.edisgo.timeseries.storage_units_reactive_power.shape == ( + 4, + len(self.edisgo.topology.storage_units_df), ) - def test_analyze(self): - if self.edisgo.results.grid_losses.empty: - self.edisgo.analyze() - # check results - assert np.isclose( - self.edisgo.results.grid_losses.loc[self.timesteps].values, - np.array([[0.16484, 0.55544], [0.41859, 0.17233]]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.pfa_slack.loc[self.timesteps].values, - np.array([[-21.68225, 10.54364], [1.36397, 0.13840]]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.v_res.loc[ - self.timesteps, "Bus_BranchTee_LVGrid_4_2" - ].values, - np.array([1.01695, 0.99917]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.v_res.loc[ - self.timesteps, "virtual_BusBar_MVGrid_1_LVGrid_4_MV" - ].values, - np.array([1.00630, 0.99930]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.pfa_p.loc[self.timesteps, "Line_60000002"].values, - np.array([0.00801, 0.08144]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.pfa_q.loc[self.timesteps, "Line_60000002"].values, - np.array([0.00263, 0.02661]), - atol=1e-5, - ).all() - assert np.isclose( - self.edisgo.results.i_res.loc[ - self.timesteps, ["Line_10005", "Line_90000021"] - ].values, - np.array([[0.22308, 0.00019], [0.00004, 0.00188]]), - atol=1e-5, - ).all() - - def test_reinforce(self): - results = self.edisgo.reinforce(combined_analysis=True) - assert results.unresolved_issues.empty - assert len(results.grid_expansion_costs) == 10 - assert len(results.equipment_changes) == 10 - # Todo: test other relevant values - - def test_to_pypsa(self): - # run powerflow and check results - pypsa_network = self.edisgo.to_pypsa() - pf_results = pypsa_network.pf(self.timesteps[0]) - - if all(pf_results["converged"]["0"].tolist()): - print("network converged") - else: - raise ValueError("Power flow analysis did not converge.") - # ToDo maybe move slack test somewhere else - slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] - assert len(slack_df) == 1 - assert slack_df.bus.values[0] == "Bus_MVStation_1" - # test exception - msg = "The entered mode is not a valid option." - with pytest.raises(ValueError, match=msg): - self.edisgo.to_pypsa(mode="unknown") - - def test_generator_import(self): - """This function just checks if API to import generators exists but - generator import for test grid will not work and raise an error.""" + def test_set_time_series_active_power_predefined(self, caplog): - # test exception - msg = ( - "At least one imported generator is not located in the MV " - "grid area. Check compatibility of grid and generator " - "datasets." + # check warning + self.edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts="oedb" ) - with pytest.raises(ValueError, match=msg): - self.edisgo.import_generators("nep2035") - - def test_mv_to_pypsa(self): - # test only mv - pypsa_network = self.edisgo.to_pypsa(mode="mv") - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("mv converged") - else: - raise ValueError("Power flow analysis did not converge.") - # ToDo maybe move slack test somewhere else - slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] - assert len(slack_df) == 1 - assert slack_df.bus.values[0] == "Bus_MVStation_1" - # test mvlv - pypsa_network = self.edisgo.to_pypsa(mode="mvlv") - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("mvlv converged") - else: - raise ValueError("Power flow analysis did not converge.") - # ToDo maybe move slack test somewhere else - slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] - assert len(slack_df) == 1 - assert slack_df.bus.values[0] == "Bus_MVStation_1" - # test only mv aggregating loads by sector and generators by - # curtailability - pypsa_network = self.edisgo.to_pypsa( - mode="mv", - aggregate_generators="curtailable", - aggregate_loads="sectoral", - ) - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("mv converged") - else: - raise ValueError("Power flow analysis did not converge.") - # ToDo maybe move slack test somewhere else - slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] - assert len(slack_df) == 1 - assert slack_df.bus.values[0] == "Bus_MVStation_1" - assert np.isclose( - pypsa_network.generators_t["p_set"].loc[ - self.timesteps, "LVGrid_1_fluctuating" + assert ( + "When setting time series using predefined profiles a time index is" + in caplog.text + ) + + # check if right functions are called + timeindex = pd.date_range("1/1/2011 12:00", periods=2, freq="H") + self.edisgo.timeseries.timeindex = timeindex + ts_fluc = pd.DataFrame( + data={ + "wind": [5, 6], + }, + index=timeindex, + ) + ts_disp = pd.DataFrame( + data={ + "other": [5, 6], + }, + index=timeindex, + ) + ts_cp = pd.DataFrame( + data={ + "hpc": [5, 6], + }, + index=timeindex, + ) + self.edisgo.topology._loads_df.loc[ + "Load_residential_LVGrid_1_4", ["type", "sector"] + ] = ("charging_point", "hpc") + + self.edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts=ts_fluc, + fluctuating_generators_names=["GeneratorFluctuating_8"], + dispatchable_generators_ts=ts_disp, + dispatchable_generators_names=["Generator_1"], + conventional_loads_ts="demandlib", + conventional_loads_names=[ + "Load_residential_LVGrid_3_2", + "Load_residential_LVGrid_3_3", ], - [0.04845, 0], + charging_points_ts=ts_cp, + charging_points_names=None, + ) + + assert self.edisgo.timeseries.generators_active_power.shape == (2, 2) + assert self.edisgo.timeseries.generators_reactive_power.shape == (2, 0) + assert self.edisgo.timeseries.loads_active_power.shape == (2, 3) + assert self.edisgo.timeseries.loads_reactive_power.shape == (2, 0) + assert self.edisgo.timeseries.storage_units_active_power.shape == (2, 0) + assert self.edisgo.timeseries.storage_units_reactive_power.shape == (2, 0) + + def test_set_time_series_reactive_power_control(self): + + # set active power time series for fixed cosphi + timeindex = pd.date_range("1/1/1970", periods=3, freq="H") + self.edisgo.set_timeindex(timeindex) + ts_solar = np.array([0.1, 0.2, 0.3]) + ts_wind = [0.4, 0.5, 0.6] + self.edisgo.set_time_series_active_power_predefined( + fluctuating_generators_ts=pd.DataFrame( + {"solar": ts_solar, "wind": ts_wind}, index=timeindex + ), + dispatchable_generators_ts=pd.DataFrame( + {"other": ts_solar}, index=timeindex + ), + conventional_loads_ts="demandlib", + ) + + # test only setting reactive power for one generator + gen = "GeneratorFluctuating_4" # solar MV generator + self.edisgo.set_time_series_reactive_power_control( + generators_parametrisation=pd.DataFrame( + { + "components": [[gen]], + "mode": ["default"], + "power_factor": ["default"], + }, + index=[1], + ), + loads_parametrisation=None, + storage_units_parametrisation=None, + ) + assert self.edisgo.timeseries.generators_reactive_power.shape == (3, 1) + assert self.edisgo.timeseries.loads_reactive_power.empty + assert self.edisgo.timeseries.storage_units_reactive_power.empty + assert ( + np.isclose( + self.edisgo.timeseries.generators_reactive_power.loc[:, gen], + ts_solar * -np.tan(np.arccos(0.9)) * 1.93, + ) ).all() - assert np.isclose( - pypsa_network.loads_t["p_set"].loc[self.timesteps, "LVGrid_1_agricultural"], - [0.01569, 0.1569], + + # test changing only configuration of one load + load = "Load_residential_LVGrid_1_5" + self.edisgo.set_time_series_reactive_power_control( + loads_parametrisation=pd.DataFrame( + { + "components": [ + [load], + self.edisgo.topology.loads_df.index.drop([load]), + ], + "mode": ["capacitive", "default"], + "power_factor": [0.98, "default"], + }, + index=[1, 2], + ), + storage_units_parametrisation=None, + ) + assert self.edisgo.timeseries.generators_reactive_power.shape == (3, 28) + assert self.edisgo.timeseries.loads_reactive_power.shape == (3, 50) + assert ( + np.isclose( + self.edisgo.timeseries.loads_reactive_power.loc[:, load], + self.edisgo.timeseries.loads_active_power.loc[:, load] + * -np.tan(np.arccos(0.98)), + ) ).all() - def test_lv_to_pypsa(self): - # test lv to pypsa - pypsa_network = self.edisgo.to_pypsa(mode="lv", lv_grid_name="LVGrid_2") - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("lv converged") - else: - raise ValueError("Power flow analysis did not converge.") - # ToDo maybe move slack test somewhere else - slack_df = pypsa_network.generators[pypsa_network.generators.control == "Slack"] - assert len(slack_df) == 1 - assert slack_df.bus.values[0] == "BusBar_MVGrid_1_LVGrid_2_LV" - # test exception - msg = "For exporting lv grids, name of lv_grid has to be provided." - with pytest.raises(ValueError, match=msg): - self.edisgo.to_pypsa(mode="lv") + def test_to_pypsa(self): - def test_mv_lv_to_pypsa_with_charging_points(self): + self.setup_worst_case_time_series() - # add charging points to LVGrid - cp1 = self.edisgo.add_component( - comp_type="ChargingPoint", - ts_active_power=pd.Series( - data=np.array([0.01, 0.02]), index=self.timesteps - ), - ts_reactive_power=pd.Series( - data=np.array([0.04, 0.03]), index=self.timesteps - ), - bus="BusBar_MVGrid_1_LVGrid_2_LV", - p_nom=0.005, - sector="work", - ) - cp2 = self.edisgo.add_component( - comp_type="ChargingPoint", - ts_active_power=pd.Series( - data=np.array([0.05, 0.06]), index=self.timesteps - ), - ts_reactive_power=pd.Series( - data=np.array([0.08, 0.07]), index=self.timesteps - ), - bus="BusBar_MVGrid_1_LVGrid_2_LV", - p_nom=0.005, - sector="work", - ) - # set charging points timeseries - # Todo: Check timeseries (has to be moved to add component) - # active_power = pd.DataFrame(data=np.array([[1, 2], [4, 3]]), - # index=self.timesteps, - # columns=['ChargingPoint_LVGrid_2_0', - # 'ChargingPoint_LVGrid_2_1']) - # reactive_power = pd.DataFrame(data=np.array([[6, 8], [7, 9]]), - # index=self.timesteps, - # columns=['ChargingPoint_LVGrid_2_0', - # 'ChargingPoint_LVGrid_2_1']) - # test lv to pypsa - pypsa_network = self.edisgo.to_pypsa(mode="lv", lv_grid_name="LVGrid_2") - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("lv converged") - else: - raise ValueError("Power flow analysis did not converge.") - # test lv to pypsa aggregate + # test mode None and timesteps None (default) + pypsa_network = self.edisgo.to_pypsa() + assert len(pypsa_network.buses) == 140 + assert len(pypsa_network.buses_t.v_mag_pu_set) == 4 + + # test mode "mv" and timesteps given pypsa_network = self.edisgo.to_pypsa( - mode="mvlv", - aggregate_generators="curtailable", - aggregate_loads="sectoral", + mode="mv", timesteps=self.edisgo.timeseries.timeindex[0] ) - pf_results = pypsa_network.pf(self.timesteps[0]) - # check if pf converged - if all(pf_results["converged"]["0"].tolist()): - print("lv converged") - else: - raise ValueError("Power flow analysis did not converge.") + assert len(pypsa_network.buses) == 31 + assert len(pypsa_network.buses_t.v_mag_pu_set) == 1 - self.edisgo.remove_component("ChargingPoint", cp1) - self.edisgo.remove_component("ChargingPoint", cp2) + # test exception + msg = "Provide proper mode or leave it empty to export entire network topology." + with pytest.raises(ValueError, match=msg): + self.edisgo.to_pypsa(mode="unknown") def test_to_graph(self): graph = self.edisgo.to_graph() @@ -324,127 +314,57 @@ def test_to_graph(self): + len(self.edisgo.topology.transformers_df.bus0.unique()) ) - def test_edisgo_timeseries_analysis(self): - dirname = os.path.dirname(__file__) - test_network_directory = os.path.join(dirname, "ding0_test_network_1") - timeindex = pd.date_range("1/1/2011", periods=8760, freq="H") - ts_gen_dispatchable = pd.DataFrame({"other": [0.775] * 8760}, index=timeindex) - ts_storage = pd.DataFrame({"Storage_1": [0.0] * 8760}, index=timeindex) - edisgo = EDisGo( - ding0_grid=test_network_directory, - timeseries_generation_fluctuating="oedb", - timeseries_generation_dispatchable=ts_gen_dispatchable, - timeseries_load="demandlib", - timeseries_storage_units=ts_storage, - ) - # check if export to pypsa is possible to make sure all values are set - pypsa_network = edisgo.to_pypsa() - assert len(pypsa_network.generators_t["p_set"]) == 8760 - assert len(pypsa_network.generators_t["q_set"]) == 8760 - assert len(pypsa_network.loads_t["p_set"]) == 8760 - assert len(pypsa_network.loads_t["q_set"]) == 8760 - # Todo: relocate? Check other values - edisgo.analyze(timesteps=timeindex[range(10)]) - print() + @pytest.mark.slow + def test_generator_import(self): + edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_2_path) + edisgo.import_generators("nep2035") + assert len(edisgo.topology.generators_df) == 1636 - def test_plot_mv_grid_topology(self): - plt.ion() - self.edisgo.plot_mv_grid_topology(technologies=True) - plt.close("all") - self.edisgo.plot_mv_grid_topology() - plt.close("all") + def test_analyze(self): - def test_plot_mv_voltages(self): - plt.ion() - # if not already done so, analyse grid - try: - if self.results.v_res is None: - self.edisgo.analyze() - except AttributeError: - self.edisgo.analyze() - except ValueError: - pass - # plot mv voltages - self.edisgo.plot_mv_voltages() - plt.close("all") + self.setup_worst_case_time_series() - def test_plot_mv_line_loading(self): - # if not already done so, analyse grid - plt.ion() - try: - if self.edisgo.results.i_res.empty: - self.edisgo.analyze() - except AttributeError: - self.edisgo.analyze() - # plot mv line loading - self.edisgo.plot_mv_line_loading() - plt.close("all") + # test mode None and timesteps None (default) + self.edisgo.analyze() + assert self.edisgo.results.v_res.shape == (4, 140) - def test_plot_mv_grid_expansion_costs(self): - plt.ion() - try: - if self.edisgo.results.grid_expansion_costs.empty: - self.edisgo.reinforce() - except AttributeError: - self.edisgo.reinforce() - # plot grid expansion costs - self.edisgo.plot_mv_grid_expansion_costs() - plt.close("all") + # test mode "mv" and timesteps given + self.edisgo.analyze(mode="mv", timesteps=self.edisgo.timeseries.timeindex[0]) + assert self.edisgo.results.v_res.shape == (1, 31) - def test_plot_mv_storage_integration(self): - plt.ion() - storage_1 = self.edisgo.topology.add_storage_unit( - "Bus_BranchTee_MVGrid_1_8", 0.3 - ) - storage_2 = self.edisgo.topology.add_storage_unit( - "Bus_BranchTee_MVGrid_1_8", 0.6 - ) - storage_3 = self.edisgo.topology.add_storage_unit( - "Bus_BranchTee_MVGrid_1_10", 0.3 - ) - self.edisgo.plot_mv_storage_integration() - plt.close("all") - self.edisgo.topology.remove_storage_unit(storage_1) - self.edisgo.topology.remove_storage_unit(storage_2) - self.edisgo.topology.remove_storage_unit(storage_3) + # test mode "lv" + self.edisgo.analyze(mode="lv", lv_grid_name="LVGrid_1") + assert self.edisgo.results.v_res.shape == (4, 15) - def test_histogramm_voltage(self): - plt.ion() - # if not already done so, analyse grid - try: - if self.edisgo.results.v_res.empty: - self.edisgo.analyze() - except AttributeError: - self.edisgo.analyze() + # ToDo: test non convergence - self.edisgo.histogram_voltage() - plt.close("all") + def test_reinforce(self): + self.setup_worst_case_time_series() + results = self.edisgo.reinforce(combined_analysis=True) + assert results.unresolved_issues.empty + assert len(results.grid_expansion_costs) == 10 + assert len(results.equipment_changes) == 10 + # Todo: test other relevant values - def test_histogramm_relative_line_load(self): - plt.ion() - try: - if self.edisgo.results.i_res.empty: - self.edisgo.analyze() - except AttributeError: - self.edisgo.analyze() + def test_add_component(self, caplog): - self.edisgo.histogram_relative_line_load() - plt.close("all") + self.setup_worst_case_time_series() + index = self.edisgo.timeseries.timeindex + dummy_ts = pd.Series(data=[0.1, 0.2, 0.1, 0.2], index=index) - def test_add_component(self): - """Test add_component method""" # Test add bus num_buses = len(self.edisgo.topology.buses_df) bus_name = self.edisgo.add_component( - comp_type="Bus", bus_name="Testbus", v_nom=20 + comp_type="bus", bus_name="Testbus", v_nom=20 ) assert bus_name == "Testbus" assert len(self.edisgo.topology.buses_df) == num_buses + 1 assert self.edisgo.topology.buses_df.loc["Testbus", "v_nom"] == 20 + # Test add line num_lines = len(self.edisgo.topology.lines_df) line_name = self.edisgo.add_component( - comp_type="Line", + comp_type="line", bus0="Bus_MVStation_1", bus1="Testbus", length=0.001, @@ -455,44 +375,64 @@ def test_add_component(self): assert self.edisgo.topology.lines_df.loc[line_name, "bus0"] == "Bus_MVStation_1" assert self.edisgo.topology.lines_df.loc[line_name, "bus1"] == "Testbus" assert self.edisgo.topology.lines_df.loc[line_name, "length"] == 0.001 - # Test add load + + # Test add load (with time series) num_loads = len(self.edisgo.topology.loads_df) load_name = self.edisgo.add_component( - comp_type="Load", + comp_type="load", + type="conventional_load", load_id=4, bus="Testbus", - p_nom=0.2, + p_set=0.2, annual_consumption=3.2, sector="residential", + ts_active_power=dummy_ts, + ts_reactive_power=dummy_ts, ) - assert load_name == "Load_MVGrid_1_residential_4" + assert load_name == "Conventional_Load_MVGrid_1_residential_4" assert len(self.edisgo.topology.loads_df) == num_loads + 1 assert self.edisgo.topology.loads_df.loc[load_name, "bus"] == "Testbus" - assert self.edisgo.topology.loads_df.loc[load_name, "p_nom"] == 0.2 + assert self.edisgo.topology.loads_df.loc[load_name, "p_set"] == 0.2 assert self.edisgo.topology.loads_df.loc[load_name, "annual_consumption"] == 3.2 assert self.edisgo.topology.loads_df.loc[load_name, "sector"] == "residential" - index = self.edisgo.timeseries.timeindex - assert np.isclose( - self.edisgo.timeseries.loads_active_power.loc[index[0], load_name], - 0.15 * 0.2, - ) - assert np.isclose( - self.edisgo.timeseries.loads_active_power.loc[index[1], load_name], - 0.2, - ) - assert np.isclose( - self.edisgo.timeseries.loads_reactive_power.loc[index[0], load_name], - tan(acos(0.9)) * 0.15 * 0.2, - ) - assert np.isclose( - self.edisgo.timeseries.loads_reactive_power.loc[index[1], load_name], - tan(acos(0.9)) * 0.2, + assert ( + self.edisgo.timeseries.loads_active_power.loc[:, load_name] == dummy_ts + ).all() + assert ( + self.edisgo.timeseries.loads_reactive_power.loc[:, load_name] == dummy_ts + ).all() + + # Test add load (with reactive power time series default mode) + load_name = self.edisgo.add_component( + comp_type="load", + type="conventional_load", + load_id=4, + bus="Testbus", + p_set=0.2, + annual_consumption=3.2, + sector="residential", + ts_active_power=dummy_ts, + ts_reactive_power="default", ) - # Todo: test other modes of timeseries (manual, None) - # Test add generator + assert ( + self.edisgo.timeseries.loads_active_power.loc[:, load_name] == dummy_ts + ).all() + assert ( + self.edisgo.timeseries.loads_reactive_power.loc[:, load_name] + == dummy_ts * np.tan(np.arccos(0.9)) + ).all() + # check that reactive power time series were not all set to default + assert ( + self.edisgo.timeseries.loads_active_power.loc[ + :, "Conventional_Load_MVGrid_1_residential_4" + ] + == dummy_ts + ).all() + + # Test add generator (without time series) num_gens = len(self.edisgo.topology.generators_df) gen_name = self.edisgo.add_component( - "Generator", + "generator", generator_id=5, bus="Testbus", p_nom=2.5, @@ -503,67 +443,55 @@ def test_add_component(self): assert self.edisgo.topology.generators_df.loc[gen_name, "bus"] == "Testbus" assert self.edisgo.topology.generators_df.loc[gen_name, "p_nom"] == 2.5 assert self.edisgo.topology.generators_df.loc[gen_name, "type"] == "solar" - assert np.isclose( - self.edisgo.timeseries.generators_active_power.loc[index[0], gen_name], - 0.85 * 2.5, - ) - assert np.isclose( - self.edisgo.timeseries.generators_active_power.loc[index[1], gen_name], - 0, + assert self.edisgo.timeseries.generators_active_power.shape == (4, num_gens) + assert self.edisgo.timeseries.generators_reactive_power.shape == (4, num_gens) + + # Test add generator (test that warning is raised when no active power time + # series is provided for default mode) + gen_name = self.edisgo.add_component( + "generator", + generator_id=5, + bus="Testbus", + p_nom=2.5, + generator_type="solar", + ts_reactive_power="default", ) - assert np.isclose( - self.edisgo.timeseries.generators_reactive_power.loc[index[0], gen_name], - -tan(acos(0.9)) * 0.85 * 2.5, + assert ( + f"Default reactive power time series of {gen_name} cannot be set as " + f"active power time series was not provided." in caplog.text ) - assert np.isclose( - self.edisgo.timeseries.generators_reactive_power.loc[index[1], gen_name], - 0, + + # Test add generator (with reactive power time series default mode) + gen_name = self.edisgo.add_component( + "generator", + generator_id=5, + bus="Testbus", + p_nom=2.5, + generator_type="solar", + ts_active_power=dummy_ts, + ts_reactive_power="default", ) - # Todo: test other modes of timeseries (manual, None) + assert ( + self.edisgo.timeseries.generators_reactive_power.loc[:, gen_name] + == dummy_ts * -np.tan(np.arccos(0.9)) + ).all() + # Test add storage unit num_storages = len(self.edisgo.topology.storage_units_df) storage_name = self.edisgo.add_component( - "StorageUnit", bus="Testbus", p_nom=3.1 + comp_type="storage_unit", bus="Testbus", p_nom=3.1, add_ts=False ) - assert storage_name == "StorageUnit_MVGrid_1_1" + assert storage_name == "StorageUnit_MVGrid_1_2" assert len(self.edisgo.topology.storage_units_df) == num_storages + 1 assert ( self.edisgo.topology.storage_units_df.loc[storage_name, "bus"] == "Testbus" ) assert self.edisgo.topology.storage_units_df.loc[storage_name, "p_nom"] == 3.1 - assert np.isclose( - self.edisgo.timeseries.storage_units_active_power.loc[ - index[0], storage_name - ], - 3.1, - ) - assert np.isclose( - self.edisgo.timeseries.storage_units_active_power.loc[ - index[1], storage_name - ], - -3.1, - ) - assert np.isclose( - self.edisgo.timeseries.storage_units_reactive_power.loc[ - index[0], storage_name - ], - -tan(acos(0.9)) * 3.1, - ) - assert np.isclose( - self.edisgo.timeseries.storage_units_reactive_power.loc[ - index[1], storage_name - ], - tan(acos(0.9)) * 3.1, - ) - # Todo: test other modes of timeseries (manual, None) - # Remove test objects - self.edisgo.remove_component("StorageUnit", storage_name) - self.edisgo.remove_component("Load", load_name) - self.edisgo.remove_component("Generator", gen_name) - # Todo: check if components were removed def test_integrate_component(self): - """Test integrate_component method""" + + self.setup_worst_case_time_series() + num_gens = len(self.edisgo.topology.generators_df) random_bus = "Bus_BranchTee_MVGrid_1_1" @@ -580,12 +508,12 @@ def test_integrate_component(self): "generator_type": "misc", "subtype": "misc_sub", } - comp_name = self.edisgo.integrate_component( - comp_type="Generator", + comp_name = self.edisgo.integrate_component_based_on_geolocation( + comp_type="generator", geolocation=(x, y), voltage_level=4, add_ts=False, - **comp_data + **comp_data, ) assert len(self.edisgo.topology.generators_df) == num_gens + 1 @@ -602,17 +530,19 @@ def test_integrate_component(self): # Point, with time series num_cps = len(self.edisgo.topology.charging_points_df) - comp_data = {"p_nom": 4, "sector": "fast"} - ts_active_power = pd.Series(data=[1, 2], index=self.edisgo.timeseries.timeindex) - ts_reactive_power = pd.Series( - data=[0, 0], index=self.edisgo.timeseries.timeindex + comp_data = {"p_set": 4, "sector": "fast"} + dummy_ts = pd.Series( + data=[0.1, 0.2, 0.1, 0.2], index=self.edisgo.timeseries.timeindex ) - comp_name = self.edisgo.integrate_component( - comp_type="ChargingPoint", + ts_active_power = dummy_ts + ts_reactive_power = dummy_ts + + comp_name = self.edisgo.integrate_component_based_on_geolocation( + comp_type="charging_point", geolocation=geom, ts_active_power=ts_active_power, ts_reactive_power=ts_reactive_power, - **comp_data + **comp_data, ) assert len(self.edisgo.topology.charging_points_df) == num_cps + 1 @@ -635,31 +565,25 @@ def test_integrate_component(self): ) # check time series assert ( - self.edisgo.timeseries.charging_points_active_power.loc[:, comp_name].values - == [1, 2] + self.edisgo.timeseries.loads_active_power.loc[:, comp_name].values + == [0.1, 0.2, 0.1, 0.2] ).all() assert ( - self.edisgo.timeseries.charging_points_reactive_power.loc[ - :, comp_name - ].values - == [0, 0] + self.edisgo.timeseries.loads_reactive_power.loc[:, comp_name].values + == [0.1, 0.2, 0.1, 0.2] ).all() # ##### LV integration # test charging point integration by nominal power, geom as shapely # Point, with time series - comp_data = {"number": 13, "p_nom": 0.04, "sector": "fast"} - ts_active_power = pd.Series(data=[1, 2], index=self.edisgo.timeseries.timeindex) - ts_reactive_power = pd.Series( - data=[0, 0], index=self.edisgo.timeseries.timeindex - ) - comp_name = self.edisgo.integrate_component( - comp_type="ChargingPoint", + comp_data = {"number": 13, "p_set": 0.04, "sector": "fast"} + comp_name = self.edisgo.integrate_component_based_on_geolocation( + comp_type="charging_point", geolocation=geom, ts_active_power=ts_active_power, ts_reactive_power=ts_reactive_power, - **comp_data + **comp_data, ) assert len(self.edisgo.topology.charging_points_df) == num_cps + 2 @@ -671,27 +595,85 @@ def test_integrate_component(self): ) # check time series assert ( - self.edisgo.timeseries.charging_points_active_power.loc[:, comp_name].values - == [1, 2] + self.edisgo.timeseries.loads_active_power.loc[:, comp_name].values + == [0.1, 0.2, 0.1, 0.2] ).all() assert ( - self.edisgo.timeseries.charging_points_reactive_power.loc[ - :, comp_name - ].values - == [0, 0] + self.edisgo.timeseries.loads_reactive_power.loc[:, comp_name].values + == [0.1, 0.2, 0.1, 0.2] ).all() + def test_remove_component(self): + + self.setup_worst_case_time_series() + + # Test remove bus (where bus cannot be removed, because load is still connected) + num_buses = len(self.edisgo.topology.buses_df) + self.edisgo.remove_component( + comp_type="bus", comp_name="Bus_BranchTee_LVGrid_2_2" + ) + assert len(self.edisgo.topology.buses_df) == num_buses + assert "Bus_BranchTee_LVGrid_2_2" in self.edisgo.topology.buses_df.index + + # Test remove load (with time series) + num_loads = len(self.edisgo.topology.loads_df) + load_name = "Load_residential_LVGrid_1_6" + self.edisgo.remove_component( + comp_type="load", + comp_name=load_name, + ) + assert len(self.edisgo.topology.loads_df) == num_loads - 1 + assert load_name not in self.edisgo.timeseries.loads_active_power.columns + assert load_name not in self.edisgo.timeseries.loads_reactive_power.columns + + # Test remove line + num_lines = len(self.edisgo.topology.lines_df) + self.edisgo.remove_component(comp_type="line", comp_name="Line_20000002") + assert len(self.edisgo.topology.lines_df) == num_lines + + # Test remove generator (without time series) + num_gens = len(self.edisgo.topology.generators_df) + self.edisgo.remove_component( + "generator", comp_name="GeneratorFluctuating_10", drop_ts=False + ) + assert len(self.edisgo.topology.generators_df) == num_gens - 1 + assert self.edisgo.timeseries.generators_active_power.shape == (4, num_gens) + assert self.edisgo.timeseries.generators_reactive_power.shape == (4, num_gens) + + # Test remove storage unit (with time series) + num_storages = len(self.edisgo.topology.storage_units_df) + self.edisgo.remove_component(comp_type="storage_unit", comp_name="Storage_1") + assert len(self.edisgo.topology.storage_units_df) == num_storages - 1 + assert load_name not in self.edisgo.timeseries.loads_active_power.columns + assert load_name not in self.edisgo.timeseries.loads_reactive_power.columns + def test_aggregate_components(self): - """Test aggregate_components method""" - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", + + self.setup_worst_case_time_series() + + # ##### test without any aggregation + + self.edisgo.topology._loads_df.at[ + "Load_residential_LVGrid_1_4", "bus" + ] = "Bus_BranchTee_LVGrid_1_10" + + # save original values + number_gens_before = len(self.edisgo.topology.generators_df) + number_loads_before = len(self.edisgo.topology.loads_df) + + self.edisgo.aggregate_components( + aggregate_generators_by_cols=[], aggregate_loads_by_cols=[] ) - # ##### test mode "by_component_type" + assert number_gens_before == len(self.edisgo.topology.generators_df) + assert number_loads_before == len(self.edisgo.topology.loads_df) + + # ##### test default (aggregate by bus only) - same EDisGo object as above + # is used + # save original values gens_p_nom_before = self.edisgo.topology.generators_df.p_nom.sum() - loads_p_nom_before = self.edisgo.topology.loads_df.p_nom.sum() + loads_p_set_before = self.edisgo.topology.loads_df.p_set.sum() gens_feedin_before = self.edisgo.timeseries.generators_active_power.sum().sum() gens_feedin_reactive_before = ( self.edisgo.timeseries.generators_reactive_power.sum().sum() @@ -702,14 +684,6 @@ def test_aggregate_components(self): ) num_gens_before = len(self.edisgo.topology.generators_df) num_loads_before = len(self.edisgo.topology.loads_df) - - # test without charging points and aggregation at the same bus - - # manipulate grid so that more than one load is connected - # at the same bus - self.edisgo.topology._loads_df.at[ - "Load_residential_LVGrid_1_4", "bus" - ] = "Bus_BranchTee_LVGrid_1_10" feedin_before = ( self.edisgo.timeseries.generators_active_power.loc[ :, ["GeneratorFluctuating_13", "GeneratorFluctuating_14"] @@ -727,8 +701,8 @@ def test_aggregate_components(self): ) self.edisgo.aggregate_components() - # test that total p_nom and total feed-in/demand stayed - # the same + + # test that total p_nom and total feed-in/demand stayed the same assert np.isclose( gens_p_nom_before, self.edisgo.topology.generators_df.p_nom.sum() ) @@ -740,7 +714,7 @@ def test_aggregate_components(self): gens_feedin_reactive_before, self.edisgo.timeseries.generators_reactive_power.sum().sum(), ) - assert np.isclose(loads_p_nom_before, self.edisgo.topology.loads_df.p_nom.sum()) + assert np.isclose(loads_p_set_before, self.edisgo.topology.loads_df.p_set.sum()) assert np.isclose( loads_demand_before, self.edisgo.timeseries.loads_active_power.sum().sum(), @@ -765,7 +739,7 @@ def test_aggregate_components(self): ) assert num_loads_before - 1 == len(self.edisgo.topology.loads_df) assert self.edisgo.topology.loads_df.at[ - "Loads_Bus_BranchTee_LVGrid_1_10", "p_nom" + "Loads_Bus_BranchTee_LVGrid_1_10", "p_set" ] == (2 * 0.001397) assert ( self.edisgo.timeseries.loads_active_power.loc[ @@ -776,41 +750,47 @@ def test_aggregate_components(self): # test that analyze does not fail self.edisgo.analyze() - # test with charging points and aggregation by bus and type/sector + # ##### test with charging points, aggregation of loads by bus, type and sector + # and aggregation of generators only by bus - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + # reset EDisGo object + self.setup_edisgo_object() + self.setup_worst_case_time_series() + + # add charging point self.edisgo.add_component( - "ChargingPoint", - bus="Bus_BranchTee_LVGrid_1_10", - sector="home", - p_nom=0.2, + comp_type="load", ts_active_power=pd.Series( - data=[0.1, 0.2], index=self.edisgo.timeseries.timeindex + data=[0.1, 0.2, 0.1, 0.2], index=self.edisgo.timeseries.timeindex ), ts_reactive_power=pd.Series( - data=[0, 0], index=self.edisgo.timeseries.timeindex + data=[0, 0, 0, 0], index=self.edisgo.timeseries.timeindex ), + bus="Bus_BranchTee_LVGrid_1_10", + type="charging_point", + sector="home", + p_set=0.2, ) # manipulate grid so that more than one load of the same sector is # connected at the same bus self.edisgo.topology._loads_df.at[ "Load_residential_LVGrid_1_4", "bus" ] = "Bus_BranchTee_LVGrid_1_10" - # manipulate grid so that two generators of different types are - # connected at the same bus - self.edisgo.topology._generators_df.at[ - "GeneratorFluctuating_13", "type" - ] = "misc" + + # save original values (only loads, as generators did not change) + loads_p_set_before = self.edisgo.topology.loads_df.p_set.sum() + loads_demand_before = self.edisgo.timeseries.loads_active_power.sum().sum() + loads_demand_reactive_before = ( + self.edisgo.timeseries.loads_reactive_power.sum().sum() + ) + num_loads_before = len(self.edisgo.topology.loads_df) self.edisgo.aggregate_components( - aggregate_loads_by_cols=["bus", "sector"], - aggregate_generators_by_cols=["bus", "type"], + aggregate_loads_by_cols=["bus", "type", "sector"], + aggregate_generators_by_cols=["bus"], ) - # test that total p_nom and total feed-in/demand stayed - # the same + + # test that total p_nom and total feed-in/demand stayed the same assert np.isclose( gens_p_nom_before, self.edisgo.topology.generators_df.p_nom.sum() ) @@ -823,9 +803,8 @@ def test_aggregate_components(self): self.edisgo.timeseries.generators_reactive_power.sum().sum(), ) assert np.isclose( - loads_p_nom_before, - self.edisgo.topology.loads_df.p_nom.sum() - - self.edisgo.topology.charging_points_df.p_nom.sum(), + loads_p_set_before, + self.edisgo.topology.loads_df.p_set.sum(), ) assert np.isclose( loads_demand_before, @@ -835,42 +814,63 @@ def test_aggregate_components(self): loads_demand_reactive_before, self.edisgo.timeseries.loads_reactive_power.sum().sum(), ) - assert np.isclose(0.2, self.edisgo.topology.charging_points_df.p_nom.sum()) + charging_points_df = self.edisgo.topology.charging_points_df + assert np.isclose(0.2, charging_points_df.p_set.sum()) assert np.isclose( - 0.3, - self.edisgo.timeseries.charging_points_active_power.sum().sum(), + 0.6, + self.edisgo.timeseries.loads_active_power.loc[:, charging_points_df.index] + .sum() + .sum(), ) assert np.isclose( 0, - self.edisgo.timeseries.charging_points_reactive_power.sum().sum(), + self.edisgo.timeseries.loads_reactive_power.loc[:, charging_points_df.index] + .sum() + .sum(), ) - # test that two generators were not aggregated - assert num_gens_before - 3 == len(self.edisgo.topology.generators_df) - # test that two loads were aggregated and one charging point was added - assert num_loads_before == len(self.edisgo.topology.loads_df) + # test that generators were aggregated + assert num_gens_before - 4 == len(self.edisgo.topology.generators_df) + # test that two loads were aggregated and that charging point was not aggregated + # with load + assert num_loads_before - 1 == len(self.edisgo.topology.loads_df) assert self.edisgo.topology.loads_df.at[ - "Loads_Bus_BranchTee_LVGrid_1_10_residential", "p_nom" + "Loads_Bus_BranchTee_LVGrid_1_10_conventional_load_residential", "p_set" ] == (2 * 0.001397) assert ( self.edisgo.timeseries.loads_active_power.loc[ - :, "Loads_Bus_BranchTee_LVGrid_1_10_residential" + :, "Loads_Bus_BranchTee_LVGrid_1_10_conventional_load_residential" ].sum() == load_before ) - # test that charging point was not aggregated with load - assert 1 == len(self.edisgo.topology.charging_points_df) + # test that analyze does not fail self.edisgo.analyze() - # #### test mode "by_load_and_generation" + # #### test without aggregation of loads and aggregation of generators + # by bus and type - # test with charging points - num_gens_before = len(self.edisgo.topology.generators_df) + # reset EDisGo object + self.setup_edisgo_object() + self.setup_worst_case_time_series() + + # manipulate grid so that two generators of different types are + # connected at the same bus + self.edisgo.topology._generators_df.at[ + "GeneratorFluctuating_13", "type" + ] = "misc" + + # save original values (values of loads were changed in previous aggregation) + loads_p_set_before = self.edisgo.topology.loads_df.p_set.sum() + loads_demand_before = self.edisgo.timeseries.loads_active_power.sum().sum() + loads_demand_reactive_before = ( + self.edisgo.timeseries.loads_reactive_power.sum().sum() + ) num_loads_before = len(self.edisgo.topology.loads_df) - self.edisgo.aggregate_components(mode="by_load_and_generation") - # test that total p_nom and total feed-in/demand stayed - # the same + self.edisgo.aggregate_components( + aggregate_generators_by_cols=["bus", "type"], aggregate_loads_by_cols=[] + ) + # test that total p_nom and total feed-in/demand stayed the same assert np.isclose( gens_p_nom_before, self.edisgo.topology.generators_df.p_nom.sum() ) @@ -882,97 +882,109 @@ def test_aggregate_components(self): gens_feedin_reactive_before, self.edisgo.timeseries.generators_reactive_power.sum().sum(), ) + assert np.isclose(loads_p_set_before, self.edisgo.topology.loads_df.p_set.sum()) assert np.isclose( - loads_p_nom_before + 0.2, self.edisgo.topology.loads_df.p_nom.sum() - ) - assert np.isclose( - loads_demand_before + 0.3, + loads_demand_before, self.edisgo.timeseries.loads_active_power.sum().sum(), ) assert np.isclose( loads_demand_reactive_before, self.edisgo.timeseries.loads_reactive_power.sum().sum(), ) - # test that generators at the same bus and load and - # charging point at same bus were aggregated - assert num_gens_before - 1 == len(self.edisgo.topology.generators_df) - assert num_loads_before - 1 == len(self.edisgo.topology.loads_df) - assert self.edisgo.topology.loads_df.at[ - "Loads_Bus_BranchTee_LVGrid_1_10", "p_nom" - ] == (2 * 0.001397 + 0.2) - assert ( - self.edisgo.timeseries.loads_active_power.loc[ - :, "Loads_Bus_BranchTee_LVGrid_1_10" - ].sum() - == load_before + 0.3 - ) + # test that generators at the same bus were aggregated and loads stayed the same + assert num_gens_before - 3 == len(self.edisgo.topology.generators_df) + assert num_loads_before == len(self.edisgo.topology.loads_df) + # test that analyze does not fail self.edisgo.analyze() - # test without charging points + def test_plot_mv_grid_topology(self): + plt.ion() + self.edisgo.plot_mv_grid_topology(technologies=True) + plt.close("all") + self.edisgo.plot_mv_grid_topology() + plt.close("all") - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) - num_gens_before = len(self.edisgo.topology.generators_df) - num_loads_before = len(self.edisgo.topology.loads_df) + len( - self.edisgo.topology.charging_points_df - ) - feedin_before = ( - self.edisgo.timeseries.generators_active_power.loc[ - :, ["GeneratorFluctuating_17", "GeneratorFluctuating_18"] - ] - .sum() - .sum() - ) + def test_plot_mv_voltages(self): + self.setup_worst_case_time_series() + plt.ion() + self.edisgo.analyze() + self.edisgo.plot_mv_voltages() + plt.close("all") - self.edisgo.aggregate_components(mode="by_load_and_generation") - # test that total p_nom and total feed-in/demand stayed - # the same - assert np.isclose( - gens_p_nom_before, self.edisgo.topology.generators_df.p_nom.sum() - ) - assert np.isclose( - gens_feedin_before, - self.edisgo.timeseries.generators_active_power.sum().sum(), - ) - assert np.isclose( - gens_feedin_reactive_before, - self.edisgo.timeseries.generators_reactive_power.sum().sum(), - ) - assert np.isclose(loads_p_nom_before, self.edisgo.topology.loads_df.p_nom.sum()) - assert np.isclose( - loads_demand_before, - self.edisgo.timeseries.loads_active_power.sum().sum(), - ) - assert np.isclose( - loads_demand_reactive_before, - self.edisgo.timeseries.loads_reactive_power.sum().sum(), + def test_plot_mv_line_loading(self): + self.setup_worst_case_time_series() + plt.ion() + self.edisgo.analyze() + self.edisgo.plot_mv_line_loading() + plt.close("all") + + def test_plot_mv_grid_expansion_costs(self): + # test with storage + self.setup_worst_case_time_series() + plt.ion() + self.edisgo.reinforce() + self.edisgo.plot_mv_grid_expansion_costs() + plt.close("all") + + # test without storage + self.setup_edisgo_object() + self.edisgo.remove_component("storage_unit", "Storage_1", False) + self.setup_worst_case_time_series() + plt.ion() + self.edisgo.reinforce() + self.edisgo.plot_mv_grid_expansion_costs() + plt.close("all") + + def test_plot_mv_storage_integration(self): + plt.ion() + storage_1 = self.edisgo.topology.add_storage_unit( + "Bus_BranchTee_MVGrid_1_8", 0.3 ) - # test that generators were aggregated - assert num_gens_before - 4 == len(self.edisgo.topology.generators_df) - assert ( - self.edisgo.topology.generators_df.at[ - "Generators_Bus_BranchTee_LVGrid_4_2", "p_nom" - ] - == 0.065 + storage_2 = self.edisgo.topology.add_storage_unit( + "Bus_BranchTee_MVGrid_1_8", 0.6 ) - assert ( - self.edisgo.timeseries.generators_active_power.loc[ - :, "Generators_Bus_BranchTee_LVGrid_4_2" - ].sum() - == feedin_before + storage_3 = self.edisgo.topology.add_storage_unit( + "Bus_BranchTee_MVGrid_1_10", 0.3 ) - # test that no loads were aggregated - assert num_loads_before == len(self.edisgo.topology.loads_df) - # test that analyze does not fail + self.edisgo.plot_mv_storage_integration() + plt.close("all") + self.edisgo.topology.remove_storage_unit(storage_1) + self.edisgo.topology.remove_storage_unit(storage_2) + self.edisgo.topology.remove_storage_unit(storage_3) + + def test_histogramm_voltage(self): + self.setup_worst_case_time_series() + plt.ion() self.edisgo.analyze() + self.edisgo.histogram_voltage() + plt.close("all") + + def test_histogramm_relative_line_load(self): + self.setup_worst_case_time_series() + plt.ion() + self.edisgo.analyze() + self.edisgo.histogram_relative_line_load() + plt.close("all") + + def test_save(self): + save_dir = os.path.join(os.getcwd(), "edisgo_network") + self.edisgo.save(save_dir) + + # check that results, topology and timeseries directory are created + dirs_in_save_dir = os.listdir(save_dir) + assert len(dirs_in_save_dir) == 3 + # Todo: check anything else? + shutil.rmtree(os.path.join(save_dir, "results")) + shutil.rmtree(os.path.join(save_dir, "topology")) + shutil.rmtree(os.path.join(save_dir, "timeseries")) def test_reduce_memory(self): - """Test reduce_memory method""" - # check one time series attribute and one results attribute + self.setup_worst_case_time_series() + self.edisgo.analyze() + + # check one time series attribute and one results attribute mem_ts_before = self.edisgo.timeseries.generators_active_power.memory_usage( deep=True ).sum() @@ -1020,3 +1032,130 @@ def test_reduce_memory(self): mem_res_with_default_2, self.edisgo.results.i_res.memory_usage(deep=True).sum(), ) + + def test_check_integrity(self, caplog): + self.edisgo.check_integrity() + assert ( + "The following generators are missing in generators_active_power: " + "{}".format(self.edisgo.topology.generators_df.index.values) in caplog.text + ) + assert ( + "The following generators are missing in generators_reactive_power: " + "{}".format(self.edisgo.topology.generators_df.index.values) in caplog.text + ) + assert ( + "The following loads are missing in loads_active_power: " + "{}".format(self.edisgo.topology.loads_df.index.values) in caplog.text + ) + assert ( + "The following loads are missing in loads_reactive_power: " + "{}".format(self.edisgo.topology.loads_df.index.values) in caplog.text + ) + assert ( + "The following storage_units are missing in storage_units_active_power" + ": {}".format(self.edisgo.topology.storage_units_df.index.values) + in caplog.text + ) + assert ( + "The following storage_units are missing in storage_units_reactive_power" + ": {}".format(self.edisgo.topology.storage_units_df.index.values) + in caplog.text + ) + caplog.clear() + # set timeseries + index = pd.date_range("1/1/2018", periods=3, freq="H") + ts_gens = pd.DataFrame( + index=index, columns=self.edisgo.topology.generators_df.index, data=0 + ) + ts_loads = pd.DataFrame( + index=index, columns=self.edisgo.topology.loads_df.index, data=0 + ) + ts_stor = pd.DataFrame( + index=index, columns=self.edisgo.topology.storage_units_df.index, data=0 + ) + self.edisgo.timeseries.timeindex = index + self.edisgo.timeseries.generators_active_power = ts_gens + self.edisgo.timeseries.generators_reactive_power = ts_gens + self.edisgo.timeseries.loads_active_power = ts_loads + self.edisgo.timeseries.loads_reactive_power = ts_loads + self.edisgo.timeseries.storage_units_active_power = ts_stor + self.edisgo.timeseries.storage_units_reactive_power = ts_stor + # check that no warning is raised + self.edisgo.check_integrity() + assert not caplog.text + manipulated_comps = { + "generators": ["Generator_1", "GeneratorFluctuating_4"], + "loads": ["Load_agricultural_LVGrid_1_3"], + "storage_units": ["Storage_1"], + } + for comp_type, comp_names in manipulated_comps.items(): + comps = getattr(self.edisgo.topology, comp_type + "_df") + # remove timeseries of single components and check for warning + for ts_type in ["active_power", "reactive_power"]: + comp_ts_tmp = getattr( + self.edisgo.timeseries, "_".join([comp_type, ts_type]) + ) + setattr( + self.edisgo.timeseries, + "_".join([comp_type, ts_type]), + comp_ts_tmp.drop(columns=comp_names), + ) + self.edisgo.check_integrity() + assert ( + "The following {type} are missing in {ts}: {comps}".format( + type=comp_type, + ts="_".join([comp_type, ts_type]), + comps=str(comp_names).replace(",", ""), + ) + in caplog.text + ) + setattr( + self.edisgo.timeseries, "_".join([comp_type, ts_type]), comp_ts_tmp + ) + caplog.clear() + # remove topology entries for single components and check for warning + setattr(self.edisgo.topology, comp_type + "_df", comps.drop(comp_names)) + self.edisgo.check_integrity() + for ts_type in ["active_power", "reactive_power"]: + assert ( + "The following {type} have entries in {type}_{ts_type}, but not " + "in {top}: {comps}".format( + type=comp_type, + top=comp_type + "_df", + comps=str(comp_names).replace(",", ""), + ts_type=ts_type, + ) + in caplog.text + ) + caplog.clear() + setattr(self.edisgo.topology, comp_type + "_df", comps) + # set values higher than nominal power for single components and check for + # warning + comp_ts_tmp = getattr( + self.edisgo.timeseries, "_".join([comp_type, "active_power"]) + ) + comp_ts_tmp_adapted = comp_ts_tmp.copy() + comp_ts_tmp_adapted.loc[index[2], comp_names] = 100 + setattr( + self.edisgo.timeseries, + "_".join([comp_type, "active_power"]), + comp_ts_tmp_adapted, + ) + self.edisgo.check_integrity() + if comp_type in ["generators", "storage_units"]: + attr = "p_nom" + else: + attr = "p_set" + assert ( + "Values of active power in the timeseries object exceed {} for " + "the following {}: {}".format( + attr, comp_type, str(comp_names).replace(",", "") + ) + in caplog.text + ) + setattr( + self.edisgo.timeseries, + "_".join([comp_type, "active_power"]), + comp_ts_tmp, + ) + caplog.clear() diff --git a/tests/test_examples.py b/tests/test_examples.py index 895c5136e..2da35a952 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,7 +1,6 @@ import os import shutil -import numpy as np import pytest from examples import example_grid_reinforcement diff --git a/tests/tools/test_plots.py b/tests/tools/test_plots.py index f6a083302..0e385d2e0 100644 --- a/tests/tools/test_plots.py +++ b/tests/tools/test_plots.py @@ -6,11 +6,10 @@ class TestPlots: @classmethod - def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, worst_case_analysis="worst-case" - ) - self.edisgo.reinforce() + def setup_class(cls): + cls.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + cls.edisgo.set_time_series_worst_case_analysis() + cls.edisgo.reinforce() def test_dash_plot(self): # TODO: at the moment this doesn't really test anything. Add meaningful tests. @@ -20,7 +19,7 @@ def test_dash_plot(self): ) # test if any errors occur when passing multiple edisgo objects - app = dash_plot( + app = dash_plot( # noqa: F841 edisgo_objects={ "edisgo_1": self.edisgo, "edisgo_2": self.edisgo, diff --git a/tests/tools/test_tools.py b/tests/tools/test_tools.py index 3ccd9ad1b..367d6bf8c 100644 --- a/tests/tools/test_tools.py +++ b/tests/tools/test_tools.py @@ -1,5 +1,3 @@ -import os - from math import sqrt import numpy as np @@ -8,35 +6,31 @@ from numpy.testing import assert_allclose, assert_array_equal from edisgo import EDisGo -from edisgo.io import ding0_import -from edisgo.network.topology import Topology from edisgo.tools import tools class TestTools: @classmethod def setup_class(self): - self.edisgo = EDisGo( - ding0_grid=pytest.ding0_test_network_path, - worst_case_analysis="worst-case", - ) + self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) + self.edisgo.set_time_series_worst_case_analysis() self.timesteps = self.edisgo.timeseries.timeindex self.edisgo.analyze() def test_calculate_relative_line_load(self): # test without providing lines and time steps rel_line_load = tools.calculate_relative_line_load(self.edisgo) - assert rel_line_load.shape == (2, 129) + assert rel_line_load.shape == (4, 129) # test with providing lines rel_line_load = tools.calculate_relative_line_load( self.edisgo, lines=["Line_10005", "Line_50000002", "Line_90000021"] ) - assert rel_line_load.shape == (2, 3) + assert rel_line_load.shape == (4, 3) assert np.isclose( rel_line_load.at[self.timesteps[0], "Line_10005"], self.edisgo.results.i_res.at[self.timesteps[0], "Line_10005"] - / (7.274613391789284 / 20 / sqrt(3)), + / (7.274613391789284 / 2 / 20 / sqrt(3)), ) assert np.isclose( rel_line_load.at[self.timesteps[1], "Line_50000002"], @@ -52,7 +46,7 @@ def test_calculate_relative_line_load(self): assert np.isclose( rel_line_load.at[self.timesteps[0], "Line_10005"], self.edisgo.results.i_res.at[self.timesteps[0], "Line_10005"] - / (7.274613391789284 / 20 / sqrt(3)), + / (7.274613391789284 / 2 / 20 / sqrt(3)), ) def test_calculate_line_reactance(self):