diff --git a/.gitignore b/.gitignore index 738b62ef..3393fdc7 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ ego/scenario_setting.json ego/noise_values.csv .idea/ noise_values.csv - +*.pkl +.ipynb_checkpoints diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..2655a200 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,10 @@ +include README.rst +include LICENSE +include *txt +include MANIFEST.in +include *.json +include *.rst +include *.csv +include ego/scenario_setting.json +include ego/data/*.csv +include ego/tools/*.json diff --git a/README.rst b/README.rst index 3e2d816d..e54d8d21 100644 --- a/README.rst +++ b/README.rst @@ -1,15 +1,22 @@ -.. image:: https://readthedocs.org/projects/openego/badge/?version=latest - :target: http://openego.readthedocs.io/en/latest/?badge=latest - :alt: Documentation Status +|readthedocs| |badge_githubstars| + .. image:: https://openegoproject.files.wordpress.com/2017/02/open_ego_logo_breit.png?w=400 + :align: right + :scale: 100% -eGo -====== +*A cross-grid-level electricity grid and storage optimization tool* +| `openegoproject.wordpress.com `_ -Integrated optimization of flexibility options and grid extension measures for power grids based on eTraGo and eDisGo. -A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. The integration of the transmission grid (via eTraGo) and distribution grid (via eDisGo) is part of eGo. +eGo +=== + +Integrated optimization of flexibility options and grid extension measures +for power grids based on `eTraGo `_ and +`eDisGo `_. The Documentation of the eGo tool +can be found on +`openego.readthedocs.io `_ . .. contents:: @@ -20,15 +27,39 @@ Create a virtualenvironment (where you like it) and activate it: .. code-block:: - $ virtualenv eGo --clear -p python3.5 + $ virtualenv venv --clear -p python3.5 $ source venv/bin/activate + $ cd venv + $ pip3 install -e git+https://github.com/openego/eGo@dev#egg=eGo --process-dependency-links + - $ pip3 install -e git+https://github.com/openego/eGo@dev#egg=eGo --process-dependency-links --allow-all-external +LICENSE +======= +© Europa-Universität Flensburg, +© Flensburg University of Applied Sciences, + Centre for Sustainable Energy Systems +© DLR Institute for Networked Energy Systems, +© Reiner-Lemoine-Institute" +This program is free software: you can redistribute it and/or modify it under +the terms of the GNU Affero General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your option) any +later version. -Copyleft -======== +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +details. -Code licensed under "GNU Affero General Public License Version 3 (AGPL-3.0)" -It is a collaborative work with several copyright owner: -Cite as "eGo" © Flensburg University of Applied Sciences, Centre for Sustainable Energy Systems © Europa-Universität Flensburg, Centre for Sustainable Energy Systems © DLR Institute for Networked Energy Systems, © Reiner-Lemoine-Institute" +You should have received a copy of the GNU General Public License along with +this program. If not, see https://www.gnu.org/licenses/. + + + +.. |badge_githubstars| image:: https://img.shields.io/github/stars/openego/eGo.svg?style=flat-square&label=github%20stars + :target: https://github.com/openego/eGo/ + :alt: GitHub stars + +.. |readthedocs| image:: https://readthedocs.org/projects/openego/badge/?version=latest + :target: http://openego.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status diff --git a/doc/_static/ribbons.html b/doc/_static/ribbons.html new file mode 100644 index 00000000..1bc61e26 --- /dev/null +++ b/doc/_static/ribbons.html @@ -0,0 +1 @@ + diff --git a/doc/api.rst b/doc/api.rst index 83b7a11a..1dce2d4a 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -1,8 +1,8 @@ .. make doc-string generated documentation appear here .. toctree:: - :maxdepth: 7 + :maxdepth: 4 :glob: - :titlesonly: + API diff --git a/doc/api/ego.rst b/doc/api/ego.rst deleted file mode 100644 index 436cb6a5..00000000 --- a/doc/api/ego.rst +++ /dev/null @@ -1,29 +0,0 @@ -ego package -=========== - -Subpackages ------------ - -.. toctree:: - - ego.tools - -Submodules ----------- - -ego\.ego_main module --------------------- - -.. automodule:: ego.ego_main - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ego - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/api/ego.tools.rst b/doc/api/ego.tools.rst index 67d915df..d3295b39 100644 --- a/doc/api/ego.tools.rst +++ b/doc/api/ego.tools.rst @@ -4,24 +4,40 @@ ego\.tools package Submodules ---------- -ego\.tools\.economics module ----------------------------- +ego\.tools\.economics +--------------------- .. automodule:: ego.tools.economics :members: :undoc-members: :show-inheritance: -ego\.tools\.io module ---------------------- +ego\.tools\.edisgo_integration +------------------------------ + +.. automodule:: ego.tools.edisgo_integration + :members: + :undoc-members: + :show-inheritance: + +ego\.tools\.io +-------------- .. automodule:: ego.tools.io :members: :undoc-members: :show-inheritance: -ego\.tools\.plots module ------------------------- +ego\.tools\.mv_cluster +---------------------- + +.. automodule:: ego.tools.mv_cluster + :members: + :undoc-members: + :show-inheritance: + +ego\.tools\.plots +----------------- .. automodule:: ego.tools.plots :members: @@ -34,8 +50,8 @@ ego\.tools\.results .. automodule:: ego.tools.results :members: :undoc-members: - :show-inheritance: - + :show-inheritance: + ego\.tools\.specs ----------------- @@ -45,12 +61,18 @@ ego\.tools\.specs :undoc-members: :show-inheritance: -ego\.tools\.utilities module ----------------------------- +ego\.tools\.storages +-------------------- -.. automodule:: ego.tools.utilities +.. automodule:: ego.tools.storages :members: :undoc-members: :show-inheritance: +ego\.tools\.utilities +--------------------- +.. automodule:: ego.tools.utilities + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/modules.rst b/doc/api/modules.rst index 1daaaa50..18acf603 100644 --- a/doc/api/modules.rst +++ b/doc/api/modules.rst @@ -1,7 +1,93 @@ +=== ego === + + +Overview of modules +=================== + + .. toctree:: - :maxdepth: 4 + :maxdepth: 7 + + ego.tools + +scenario_settings.json +====================== + +With the ``scenario_settings.json`` file you set up your calcualtion. +The file can be found on +`github `_. + +.. json:object:: scenario_setting.json + + This file contains all input settings for the eGo tool. + + :property global: Global settings that are valid for both eTraGo and eDisGo + :proptype global: :json:object:`global` + :property eTraGo: eTraGo settings, only valid for eTraGo run + :proptype eTraGo: :json:object:`eTraGo` + :property eDisGo: eDisGo settings, only valid for eDisGo runs + :proptype eDisGo: :json:object:`eDisGo` + + +.. json:object:: global + + :property bool eTraGo: Decide if you want to run the eTraGo tool (HV/EHV grid optimization). + :property bool eDisGo: Decide if you want to run the eDisGo tool (MV grid optimiztaion). + :property string db: Name of your database (e.g.``''oedb''``). + :property bool recover: If ``true``, (previously calculated) eTraGo results are queried from your database (instead of performing a new run). + :property int result_id: ID of the (previeously calculated) eTraGo results that are queried if **recover** is set ``true``. + :property string gridversion: Version of the *open_eGo* input data-sets (e.g. ``''v0.4.2''``) + - ego +.. json:object:: eTraGo + + This section of :json:object:`scenario_setting.json` contains all input parameters for the eTraGo tool. A description of the parameters can be found `here. `_ + + Please note that some parameters are already included in :json:object:`global` + + +.. json:object:: eDisGo + + This section of :json:object:`scenario_setting.json` contains all input parameters for the eDisGo tool and the Clustering of MV grids. + + :property string ding0_files: Relative path to the MV grid files (created by `ding0 `_) (e.g. ``''data/MV_grids/20180713110719''``) + :property string choice_mode: Mode that eGo uses to chose MV grids out of the files in **ding0_files** (e.g. ``''manual''``, ``''cluster''`` or ``''all''``). If ``''manual''`` is chosen, the parameter **manual_grids** must contain a list of the desired grids. If ``''cluster''`` is chosen, **no_grids** must specify the desired number of clusters. If ``''all''`` is chosen, all MV grids from **ding0_files** are calculated. + :property list manual_grids: List of MV grid ID's (*open_eGo* HV/MV substation ID's) + :property int no_grids: Number of MV grid clusters (from all files in **ding0_files**, a specified number of representative clusters is calculated) + + + +ego_main.py +=========== + +This is the application file for the tool eGo. The application eGo calculates +the distribution and transmission grids of eTraGo and eDisGo. + +.. note:: Note, the data source of eGo relies on + the Open Energy Database. - The registration for the public + accessible API can be found on + `openenergy-platform.org/login `_. + +Run the ``ego_main.py`` file with: + +.. code-block:: bash + + >>> python3 ego_main.py + >>> ... + >>> INFO:ego:Start calculation + >>> ... + +The eGo App works like: + +.. code-block:: python + + >>> from ego.tools.io import eGo + >>> ego = eGo(jsonpath='scenario_setting.json') + >>> ego.etrago_line_loading() + >>> print(ego.etrago.storage_costs) + >>> ... + >>> INFO:ego:Start calculation + >>> ... diff --git a/doc/conf.py b/doc/conf.py index 2dc759c1..a8a92435 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -36,10 +36,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../')) - - -sys.path - +sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ------------------------------------------------ @@ -63,10 +60,13 @@ 'numpydoc', 'sphinxcontrib.httpdomain', # for restfull API 'sphinxcontrib.autohttp.flask', - 'sphinx.ext.extlinks' # enables external links with a key + 'sphinx.ext.extlinks', # enables external links with a key + 'sphinxjsondomain' ] +# https://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings +numpydoc_show_class_members = False # Napoleon settings napoleon_google_docstring = True @@ -118,12 +118,12 @@ def setup(app): import json path = os.getcwd() -json_file ='../ego/scenario_setting.json' +json_file = '../ego/scenario_setting.json' with open(path +'/'+json_file) as f: scn_set = json.load(f) - +json_global = list(scn_set['eTraGo']) html_context = { 'power_class': power_class, @@ -131,10 +131,6 @@ def setup(app): } - - - - # add RestFull API httpexample_scheme = 'https' @@ -164,9 +160,9 @@ def setup(app): # built documents. # # The short X.Y version. -version = '0.1.0' +version = '0.2.0' # The full version, including alpha/beta/rc tags. -release = '0.0.1dev' +release = '0.2.0' # The language for content autogenerated by Sphinx. Refer to documentation @@ -184,7 +180,7 @@ def setup(app): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'whatsnew'] +exclude_patterns = ['_build', 'whatsnew', '_static'] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -269,7 +265,7 @@ def __getattr__(cls, name): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] +html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -417,7 +413,15 @@ def __getattr__(cls, name): # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} +intersphinx_mapping = {'python': ('https://docs.python.org/3', None), + 'etrago': ('https://etrago.readthedocs.io/en/latest', + None), + 'edisgo': ('http://edisgo.readthedocs.io/en/dev',None), + 'ding0': ('https://dingo.readthedocs.io/en/dev',None), + 'pypsa': ('https://pypsa.org/doc/',None), + 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', + None), + } # Numbered figures numfig = True diff --git a/doc/developer.rst b/doc/developer.rst index c0bdce24..43c0e72f 100644 --- a/doc/developer.rst +++ b/doc/developer.rst @@ -1,29 +1,63 @@ +=============== Developer notes -~~~~~~~~~~~~~~~ +=============== +Installation +============ -Interface eDisGo for grid and storage costs -------------------------------------------- +.. note:: + Installation is only tested on (Ubuntu like) linux OS. +1. Create a virtualenvironment (where you like it) and activate it: -.. code-block:: python +.. code-block:: bash - # get setting from eTraGo for eDisGo - specs = get_etragospecs_from_db(session, bus_id, result_id, scn_name) - ... - # Create scenario or eDisGo of one mv Grid - scenario = Scenario(etrago_specs=specs, - power_flow=(), - mv_grid_id=mv_grid_id, - scenario_name='NEP 2035') - ... - # import ding0 mv grid - network = Network.import_from_ding0(file_path, - id=mv_grid_id, - scenario=scenario) + $ virtualenv --clear -p python3.5 ego_dev`` + $ cd ego_dev/ + $ source bin/activate +2. Clone eGo from github.com by running following command in your terminal: + +.. code-block:: bash + + $ git clone https://github.com/openego/eGo + + +With your activated environment `cd` to the cloned directory and run +``pip3 install -e eGo --process-dependency-links --allow-all-external`` . +This will install all needed packages into your environment. + +.. warning:: + + Note, that the first release for deveolper is partly dependent on + forks and developent versions which could not automaticly be installed. + Check your installed packages using ``pip3 freeze`` with the + `ego_dependencies.txt + `_ + + +3. Work arounds : + +After your installation install the eGo PyPSA fork on +`dev `_ +``pip3 install -e git+https://github.com/openego/PyPSA.git@dev#egg=PyPSA`` +and Folium for an web based ploting with +``pip3 install -e git+git@github.com:python-visualization/folium.git@dev#egg=folium`` + +Check if the `config.json `_ +file from eTraGo is installed in your libary +``/lib/python3.5/site-packages/etrago/tools`` . +If not copy and paste this file into this folder. + +If Database connection or table erros appears use: +``pip3 install -e git+git@github.com:openego/ego.io.git@dev#egg=ego.io`` + + +eDisGo +====== + eDisGo units ------------ @@ -36,12 +70,13 @@ eDisGo units Definition of grid expansion costs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +================================== `grid expansion costs `_ Definition of storage exansion -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------------ + `Attributes according to PyPSA `_ Change of units from Mega to kilo: @@ -50,3 +85,7 @@ Change of units from Mega to kilo: :file: storage_units.csv :delim: , :header-rows: 1 + +eTraGo +====== + diff --git a/doc/getting_started.rst b/doc/getting_started.rst index 5628ae13..b2154e61 100644 --- a/doc/getting_started.rst +++ b/doc/getting_started.rst @@ -1,57 +1,45 @@ +=============== Getting started -############### +=============== -.. warning:: - Note, eGo, eTraGo and eDisGo relies on data provided by the OEDB. Currently, only members - of the openego project team have access to this database. Public access - (SQL queries wrapped by HTML) to the OEDB will be provided soon +How to use eGo? +=============== +1. Check and prepare your eGo setting in ``ego/scenario_setting.json`` +2. Start your calculation with predefined results tools and run under + ``eGo/ego`` the main file with ``>>> python3 ego_main.py`` -Installation -============ + -.. note:: - Installation is only tested on (Ubuntu like) linux OS. + -1. Create a virtualenvironment (where you like it) and activate it: +.. code-block:: bash - ``$virtualenv --clear -p python3.5 ego_dev`` and ``$ cd ego_dev/`` - followed by ``$ source bin/activate`` + >>> python3 ego_main.py + >>> ... + >>> INFO:ego:Start calculation + >>> ... -2. Clone eGo from github.com by running following command in your terminal: +Examples +======== - ``` - git clone https://github.com/openego/eGo - ``` +.. code-block:: python -With your activated environment `cd` to the cloned directory and run -``pip3 install -e eGo --process-dependency-links --allow-all-external`` . This will install all needed packages into your environment. + # import the eGo tool + from ego.tools.io import eGo -.. warning:: + # Run your scenario + ego = eGo(jsonpath='scenario_setting.json') - Note, that the first release for deveolper is partly dependent on - forks and developent versions which could not automaticly be installed. - Check your installed packages using ``pip3 freeze`` with the - `ego_dependencies.txt `_ + # Analyse your results on extra high voltage level (etrago) + ego.etrago_line_loading() -3. Temporary solution: - After your installation install the eGo PyPSA fork on `dev `_ - ``pip3 install -e git+https://github.com/openego/PyPSA.git@dev#egg=PyPSA`` - and Folium for an web based ploting with - ``pip3 install -e git+git@github.com:python-visualization/folium.git@5739244acb9868d001032df288500a047b232857#egg=folium`` +Example Cluster of Germany +========================== - Check if the `config.json `_ - file from eTraGo is installed in your libary ``/lib/python3.5/site-packages/etrago/tools`` . - If not copy and paste this file into this folder. +.. raw:: html + :file: images/iplot_cluster.html - If Database connection or table erros appears use: ``pip3 install -e git+git@github.com:openego/ego.io.git@3b76dfddea14d67eb4421b6223bf981d8851e4e6#egg=ego.io`` - - -Using eGo: -========== - -1. check and prepare your eGo setting in ``ego/scenario_setting.json`` -2. Start your calculation with in the directory of ``eGo/ego`` with ``python3 ego_main.py`` diff --git a/doc/images/open_ego_logo.png b/doc/images/open_ego_logo.png new file mode 100644 index 00000000..3e34fccc Binary files /dev/null and b/doc/images/open_ego_logo.png differ diff --git a/doc/images/open_ego_models_overview.png b/doc/images/open_ego_models_overview.png new file mode 100644 index 00000000..23663c61 Binary files /dev/null and b/doc/images/open_ego_models_overview.png differ diff --git a/doc/index.rst b/doc/index.rst index 27b6d930..9936f91d 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,35 +1,43 @@ +============================= Welcome to eGo documentation! ============================= +.. image:: images/open_ego_logo.png + :align: right + :scale: 40% + -.. image:: https://openegoproject.files.wordpress.com/2017/02/open_ego_logo_breit.png?w=400 - :align: right - :scale: 100% +.. note:: Note, this software is under construction and not + ready for a normal use. + +.. note:: Note, the data source of eGo relies on + the Open Energy Database. - The registration for the public + accessible API can be found on + `openenergy-platform.org/login `_. + + +Overview +======== .. toctree:: - :maxdepth: 7 - :titlesonly: + :maxdepth: 2 welcome + installation getting_started - usage_details + theoretical_background developer whatsnew api - -Take also a look into the Documentation of `eTraGo `_ and `eDisGo `_ which are part of eGo. - - - -.. note:: Note, this software is under construction and not ready for a normal use. - -.. warning:: Note, currently the data source of eGo relies on - the Open Energy Database - has no public accessible API, yet. Thus, currently you won't be able to run eGo without modifications. +Take also a look into the Documentation of +`eTraGo `_ and +`eDisGo `_ which are part of eGo. @@ -39,3 +47,8 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` + + + +.. raw:: html + :file: _static/ribbons.html diff --git a/doc/installation.rst b/doc/installation.rst new file mode 100644 index 00000000..6be71a9f --- /dev/null +++ b/doc/installation.rst @@ -0,0 +1,111 @@ +============ +Installation +============ +eGo is designed as a Python package therefore it is mandatory to have +`Python 3 `_ installed. If you have a +working Python3 environment, use pypi to install the latest eGo version. +We highly recommend you to use a virtual environment. Use following pip +command in order to install eGo: + +.. code-block:: bash + + $ pip3 install eGo --process-dependency-links + + + +Using virtual environment +========================= + +Firstly, you create a virtual environment (where you like it) and activate it: + +.. code-block:: bash + + $ virtualenv venv --clear -p python3.5 + $ source venv/bin/activate + $ cd venv + +Inside your virtual environment you can install eGo with the pip command. + +Linux and Ubuntu +================ + +The Package eGo is tested with Ubuntu 16.04 and 18.04 inside the virtual +environments of `virtualenv `_. +The installation is shown above. + + + +Windows or Mac OSX users +======================== + +For Windows and/or Mac OSX user we highly recommend to install and use Anaconda +for you Python3 installation. First install anaconda inclusing python 3.5 or +higher version from https://www.anaconda.com/download/ and open an anaconda +prompt as administrator and run: + +.. code-block:: bash + + $ conda install pip + $ conda config --add channels conda-forge + $ conda install shapely + $ pip3 install eGo --process-dependency-links + +The full Documentation can be found +`on this page `_ . We use Anaconda +with an own environment in order to reduze problems with Packages and different +versions on our system. Learn more about +`Anacona `_ +environments. + + + +Setup database connection +========================= +The package ``ego.io`` gives you a python SQL-Alchemy representations of +the **OpenEnergy-Database** (oedb) and access to it by using the +`oedialect `_ a SQL-Alchemy binding +Python package for the REST-API used by the OpenEnergy Platform (OEP). Your API +access / login data will be saved in the folder ``.egoio`` in the file +``config.ini``. You can create a new account on +`openenergy-platform.org/login `_. + + +oedialect connection +-------------------- + +.. code-block:: bash + + [oedb] + dialect = oedialect + username = + database = oedb + host = openenergy-platform.org + port = 80 + password = + + +Local database connection +------------------------- + +.. code-block:: bash + + [local] + username = YourOEDBUserName + database = YourLocalDatabaseName + host = localhost or 127.0.0.1 + port = 5433 + pw = YourLocalPassword + + + +Old developer connection +------------------------ + +.. code-block:: bash + + [oedb] + username = YourOEDBUserName + database = oedb + host = oe2.iws.cs.ovgu.de + port = 5432 + pw = YourOEDBPassword diff --git a/doc/theoretical_background.rst b/doc/theoretical_background.rst new file mode 100644 index 00000000..982262da --- /dev/null +++ b/doc/theoretical_background.rst @@ -0,0 +1,28 @@ +====================== +Theoretical background +====================== + +.. contents:: + + +Models overview +================ + + +.. figure:: images/open_ego_models_overview.png + :width: 1123px + :height: 794px + :scale: 70% + :alt: Overview of Models and processes which are used by eGo + :align: center + +Cluster methods +=============== + + +Economic calculation +==================== + + +References +========== diff --git a/doc/usage_details.rst b/doc/usage_details.rst deleted file mode 100644 index 0aae78be..00000000 --- a/doc/usage_details.rst +++ /dev/null @@ -1,11 +0,0 @@ - -How to use eGo? -~~~~~~~~~~~~~~~~~~ - - -Examples -======== - -1. Change your json file in order to select your calculation settings. - -2. Run the main file with ``$ python3 ego_main.py`` diff --git a/doc/welcome.rst b/doc/welcome.rst index f24b0233..41fa38b8 100644 --- a/doc/welcome.rst +++ b/doc/welcome.rst @@ -1,6 +1,9 @@ -############ +============ The eGo tool -############ +============ + +eGo +=== The python package eGo is a toolbox and application which connects the tool eTraGo with a Optimization of flexibility options for transmission grids based on PyPSA @@ -9,7 +12,8 @@ packages are part of the research project `open_eGo `_, `eDisGo `_ and the `OpenEnergy Platform `_ which are part of the open_eGo project. + + +LICENSE +======= + +© Copyright 2015-2018 + +Flensburg University of Applied Sciences, +Europa-Universität Flensburg, +Centre for Sustainable Energy Systems + + +This program is free software: you can redistribute it and/or modify it under +the terms of the GNU Affero General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your option) +any later version. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +more details. + +You should have received a copy of the GNU General Public License along +with this program. +If not, see `www.gnu.org/licenses `_. diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index 2542fbc9..3f593663 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -9,5 +9,6 @@ See what's new as per release! :backlinks: top +.. include:: whatsnew/v0-2-0.rst .. include:: whatsnew/v0-1-0.rst -.. include:: whatsnew/v0-0-1.rst +.. include:: whatsnew/v0-0-1.rst diff --git a/doc/whatsnew/v0-0-1.rst b/doc/whatsnew/v0-0-1.rst index 5e7f028e..629f6914 100644 --- a/doc/whatsnew/v0-0-1.rst +++ b/doc/whatsnew/v0-0-1.rst @@ -1,17 +1,15 @@ Release v0.0.1 (February 02, 2018) ++++++++++++++++++++++++++++++++++ -As this is the first release of eGo. The tool eGo use the Python3 Packages eTraGo -(Optimization of flexibility options for transmission grids based on PyPSA) -and eDisGo (Optimization of flexibility options and grid expansion for -distribution grids based on PyPSA) for an electrical power calculation from extra high -voltage to selected low voltage level. - - +As this is the first release of eGo. The tool eGo use the Python3 Packages +eTraGo (Optimization of flexibility options for transmission grids based on +PyPSA) and eDisGo (Optimization of flexibility options and grid expansion for +distribution grids based on PyPSA) for an electrical power calculation from +extra high voltage to selected low voltage level. Added features -------------- * Interface between eTraGo and eDisGo * Plots with folium -* first Result structure +* first Result structure diff --git a/doc/whatsnew/v0-2-0.rst b/doc/whatsnew/v0-2-0.rst new file mode 100644 index 00000000..b9223dbc --- /dev/null +++ b/doc/whatsnew/v0-2-0.rst @@ -0,0 +1,39 @@ +Release v0.2.0 (July 18, 2018) +++++++++++++++++++++++++++++++ + +Fundamental structural changes of the eGo tool are included in this release. A new feature is the integration of the MV grid power flow simulations, performed by the tool `eDisGo. `_. Thereby, eGo can be used to perforem power flow simulations and optimizations for EHV, HV (*eTraGo*) and MV (*eDisGo*) grids. + +Moreover, the use of the Dataprocessing versions ``''v0.4.1''`` and ``''v0.4.2''`` is supported. Please note, that this release is still under construction and only recommended for developers of the *open_eGo* project. + +Furthermore, overall cost aggregation functions are available. + +Added features +-------------- + +* Cleaned and restructured eGo classes and functions + * Moved classes of eGo from results.py to io.py + * Move serveral function + +* Introduce new files for *eDisGo* handling + * edisgo_integration.py + * mv_cluster.py + +* Introduce new file storages.py for eTraGo +* Updated eTraGo 0.6 and integrated eTraGo's new functions and features to eGo +* Updated eDisGo 0.0.3 version which includes the features of a parallelization + for custom function and other important API changes. +* Started to implemented pep8 style to eGo Code +* Implemented logging function for the whole model +* Using the Restfull-API for the OpenEnergy Database connection, buy using + ego.io v0.4.2. A registration is needed and can be done on + `openenergy-platform.org/login `_ +* Remove functionalities from ``ego_main.py`` to the eGo class +* Fixed eTraGo scenario import of ``etrago_from_oedb()`` + + +Notes +----- +* As an external user you need to have and account on the + `openenergy-platform.org/login `_ +* In future versions, all MV grids (*ding0* grids) will be queried from your database. However, in this version all MV grids have to be generated with the tool `ding0 `_ and stored in *eGo*'s *data* folder. +* Total operational costs are missing in this release diff --git a/ego/__init__.py b/ego/__init__.py index e894ccc1..34b38237 100644 --- a/ego/__init__.py +++ b/ego/__init__.py @@ -1,23 +1,21 @@ -## This program is free software; you can redistribute it and/or -## modify it under the terms of the GNU Affero General Public License as -## published by the Free Software Foundation; either version 3 of the -## License, or (at your option) any later version. +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. -## This program is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -## GNU Affero General Public License for more details. -## You should have received a copy of the GNU Affero General Public License -## along with this program. If not, see . +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . - -__version__ = "0.0.1" -__copyright__ = "Europa-Universität Flensburg, Centre for Sustainable Energy Systems" +__version__ = "0.2" +__copyright__ = ("Europa-Universität Flensburg, " + " Centre for Sustainable Energy Systems") __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke" import logging logging.basicConfig(level=logging.INFO) - - diff --git a/ego/data/ding0_grids/ding0_grids__1802.pkl b/ego/data/ding0_grids/ding0_grids__1802.pkl deleted file mode 100644 index 13ec6307..00000000 Binary files a/ego/data/ding0_grids/ding0_grids__1802.pkl and /dev/null differ diff --git a/ego/ego_main.py b/ego/ego_main.py index 82164c99..38b8846f 100644 --- a/ego/ego_main.py +++ b/ego/ego_main.py @@ -1,185 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description """ This is the application file for the tool eGo. The application eGo calculates the distribution and transmission grids of eTraGo and eDisGo. -.. warning:: - Note, that this Repository is under construction and relies on data provided - by the OEDB. Currently, only members of the openego project team have access - to this database. - +.. note:: Note, the data source of eGo relies on + the Open Energy Database. - The registration for the public + accessible API can be found on + `openenergy-platform.org/login `_. """ -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ - "Flensburg, Centre for Sustainable Energy Systems" -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolfbunke, maltesc" import pandas as pd import os -if not 'READTHEDOCS' in os.environ: - from etrago.appl import etrago - from tools.plots import (make_all_plots,plot_line_loading, plot_stacked_gen, - add_coordinates, curtailment, gen_dist, - storage_distribution, igeoplot) - # For importing geopandas you need to install spatialindex on your system http://github.com/libspatialindex/libspatialindex/wiki/1.-Getting-Started - from tools.utilities import get_scenario_setting, get_time_steps - from tools.io import geolocation_buses, etrago_from_oedb - from tools.results import eGo - from sqlalchemy.orm import sessionmaker - from egoio.tools import db - from etrago.tools.io import results_to_oedb - -# ToDo: Logger should be set up more specific -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) -## Logging -logging.basicConfig(format='%(asctime)s %(message)s',level=logging.INFO) - -logger = logging.getLogger(__name__) -ego_logger = logging.getLogger('ego') - -fh = logging.FileHandler('ego.log', mode='w') -fh.setLevel(logging.INFO) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -fh.setFormatter(formatter) +if not 'READTHEDOCS' in os.environ: + from tools.io import eGo + from tools.utilities import define_logging + logger = define_logging(log_name='ego.log') -logger.addHandler(fh) -ego_logger.addHandler(fh) +__copyright__ = ("Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke, maltesc" if __name__ == '__main__': - # import scenario settings **args of eTraGo - args = get_scenario_setting(json_file='scenario_setting.json') - print (args) - - try: - conn = db.connection(section=args['global']['db']) - Session = sessionmaker(bind=conn) - session = Session() - except OperationalError: - logger.error('Failed connection to Database', exc_info=True) - - # start calculations of eTraGo if true - if args['global']['eTraGo']: - # start eTraGo calculation - eTraGo = etrago(args['eTraGo']) - - eGo = eGo(eTraGo=eTraGo, scn_name='Status Quo') - - # add country code to bus and geometry (shapely) - # eTraGo.buses = eTraGo.buses.drop(['country_code','geometry'], axis=1) - #test = geolocation_buses(network = eTraGo, session) - - # make a line loading plot - eGo.eTraGo.plot_line_loading(eTraGo) - - - # get eTraGo results form db - if args['global']['recover']: - eTraGo = etrago_from_oedb(session,args) - - # use eTraGo results from ego calculations if true - # ToDo make function edisgo_direct_specs() - - - - if args['eDisGo']['direct_specs']: - # ToDo: add this to utilities.py - - logging.info('Retrieving Specs') - - bus_id = 27574 #23971 - - from ego.tools.specs import get_etragospecs_direct, get_mvgrid_from_bus_id - from egoio.db_tables import model_draft - specs = get_etragospecs_direct(session, bus_id, eTraGo, args) - - - - # ToDo make loop for all bus ids - # make function which links bus_id (subst_id) - if args['eDisGo']['specs']: - - logging.info('Retrieving Specs') - # ToDo make it more generic - # ToDo iteration of grids - # ToDo move part as function to utilities or specs - bus_id = 27574 #23971 - result_id = args['global']['result_id'] - - from ego.tools.specs import get_etragospecs_from_db, get_mvgrid_from_bus_id - from egoio.db_tables import model_draft - specs = get_etragospecs_from_db(session, bus_id, result_id) - - mv_grid = get_mvgrid_from_bus_id(session, bus_id) # This function can be used to call the correct MV grid - - if args['global']['eDisGo']: - - logging.info('Starting eDisGo') - - # ToDo move part as function to utilities or specs - from datetime import datetime - from edisgo.grid.network import Network, Scenario, TimeSeries, Results, ETraGoSpecs - import networkx as nx - import matplotlib.pyplot as plt - - # ToDo get ding0 grids over db - # ToDo implemente iteration - file_path = 'data/ding0_grids/ding0_grids__1802.pkl' - - #mv_grid = open(file_path) - - mv_grid_id = file_path.split('_')[-1].split('.')[0] - power_flow = (datetime(2011, 5, 26, 12), datetime(2011, 5, 26, 13)) # Where retrieve from? Database or specs? - - timeindex = pd.date_range(power_flow[0], power_flow[1], freq='H') - - scenario = Scenario(etrago_specs=specs, - power_flow=(), - mv_grid_id=mv_grid_id, - scenario_name= args['eTraGo']['scn_name']) - - network = Network.import_from_ding0(file_path, - id=mv_grid_id, - scenario=scenario) - # check SQ MV grid - network.analyze() - - network.results.v_res(#nodes=network.mv_grid.graph.nodes(), - level='mv') - network.results.s_res() - - # A status quo grid (without new renewable gens) should not need reinforcement - network.reinforce() - - - nx.draw(network.mv_grid.graph) - plt.draw() - plt.show() - - # network.results = Results() - costs = network.results.grid_expansion_costs - print(costs) - - - - # make interactive plot with folium - #logging.info('Starting interactive plot') - #igeoplot(network=eTraGo, session=session, args=args) # ToDo: add eDisGo results - - # calculate power plant dispatch without grid utilization (either in pypsa or in renpassgis) - - # result queries...call functions from utilities - - ## total system costs of transmission grid vs. total system costs of all distribution grids results in overall total - ## details on total system costs: - ## existing plants: usage, costs for each technology - ## newly installed plants (storages, grid measures) with size, location, usage, costs - ## grid losses: amount and costs - - # possible aggregation of results - - # exports: total system costs, plots, csv export files + # import scenario settings **args of eTraGo +# logger.info('Start calculation') + + ego = eGo(jsonpath='scenario_setting.json') +# logger.info('Print results') +# ego.etrago_line_loading() +# print(ego.etrago.generator) +# print(ego.etrago.storage_costs) +# print(ego.etrago.operating_costs) diff --git a/ego/examples/tutorials/README.md b/ego/examples/tutorials/README.md new file mode 100644 index 00000000..2d30992a --- /dev/null +++ b/ego/examples/tutorials/README.md @@ -0,0 +1,98 @@ +# eGo Tutorials + + +## eDisGo +The python package eDisGo provides a toolbox for analysis and optimization of distribution grids. This software lives in the context of the research project open_eGo. It is closely related to the python project Ding0 as this project is currently the single data source for eDisGo providing synthetic grid data for whole Germany. + + +Learn more about: +* [eDisGo – Optimization of flexibility options and grid expansion for distribution grids based on PyPSA](http://edisgo.readthedocs.io/en/dev/start_page.html) + + +## eTraGo +Optimization of flexibility options for transmission grids based on PyPSA. + +A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and ‘upper’ distribution grid is part of eTraGo. + +The focus of optimization are flexibility options with a special focus on energy storages and grid expansion measures. + + +The python tool eTraGo can be used in several forms like from a terminal as an execution program, by integrated development environments (IDE) like [Spyder](https://anaconda.org/anaconda/spyder), [Jupyter notebooks](http://jupyter.org/install) or many more. + +A general description how you to install and work with eTraGo can be found also [here](http://etrago.readthedocs.io/en/latest/getting_started.html). + + +# Notebook installation + +#### with Anaconda + +Download and install your Python 3.x version of Anaconda [here](https://www.anaconda.com/download/). The full Documentation can be found [on this page.](https://docs.anaconda.com/anaconda/install/) + +We use Anaconda with an own environment in order to reduze problems with Packages and different versions on our system. Learn more about [Anacona environments](https://conda.io/docs/user-guide/tasks/manage-environments.html). Remove your environment with _'conda env remove -n openMod_Zuerich2018'_. + + + + +##### Quick start - steps to do: + +0. Sign-in on [openenergy-platform.org](http://openenergy-platform.org/login/) +1. Install Anacanda +2. Get eGo Repository from github +3. Create environment +4. Activate your environment +5. Install you notebook requirements +6. Make few settings for your notebook +7. Start your notebook and check if the notebook is running + + + +##### Get eGo Repository and install it with an environment +```desktop + +$ git clone -b features/tutorial https://git@github.com/openego/eGo.git +$ cd eGo/ego/examples/tutorials/ +$ conda env create --file requirements.yml +``` + +##### Activate your environment and run your notebooks +```desktop + +$ source activate openMod_Zuerich2018 +$ jupyter notebook +$ source deactivate +``` + +##### fixes and work arounds: + +* Error in function plot_stacked_gen() due to data name changes. Fix error in ../eGo/ego/examples/tutorials/src/etrago/etrago/tools/plot.py and add: 'wind_offshore':'skyblue', 'wind_onshore':'skyblue', instead of 'wind'; restart kernel +plot_stacked_gen(network, resolution="MW") + + +##### API and ego.io settings + +Your API settings will be saved in the folder .egoio in the file config.ini. + + +```desktop +[oedb] +dialect = oedialect +username = +database = oedb +host = openenergy-platform.org +port = 80 +password = +``` + + +### Start you Notebook + +```desktop +$ jupyter notebook +``` + +See for more information [how to run your jupyter notebook](https://jupyter.readthedocs.io/en/latest/running.html#running). + + +

Note:

+ +The installation is only tested on Ubuntu 16.4. and Windows 10 with [Anaconda](https://www.anaconda.com/download/) diff --git a/ego/examples/tutorials/edisgo_simple_example.ipynb b/ego/examples/tutorials/edisgo_simple_example.ipynb new file mode 100644 index 00000000..0581729a --- /dev/null +++ b/ego/examples/tutorials/edisgo_simple_example.ipynb @@ -0,0 +1,1495 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "__copyright__ = \"Reiner Lemoine Institut gGmbH\"\n", + "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", + "__url__ = \"https://github.com/openego/eDisGo/blob/master/LICENSE\"\n", + "__author__ = \"gplssm, birgits\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Welcome to the eDisGo example\n", + "\n", + "eDisGo is a **python toolbox for the analysis of distribution networks** (low and medium voltage) that can be used to investigate economically viable **network expansion** scenarios, considering alternative flexibility options such as **storages or redispatch**. \n", + "\n", + "eDisGo is developed in the [open_eGo research project](https://openegoproject.wordpress.com/). It is based on [PyPSA](https://pypsa.org/), a toolbox for simulation and optimization of power networks, and closely related to the [ding0](https://dingo.readthedocs.io/en/dev/) project. ding0 stands for distribution network generator and is a tool to generate synthetic status quo medium and low voltage power distribution networks based on open (or at least accessible) data. It is currently the single data source for eDisGo providing synthetic grid data for whole Germany.\n", + "\n", + "**! eDisGo is work in progress !** Please be aware that some of its features may still be buggy and not yet very sophisticated. We are happy for any bug reports, hints, etc. you may have for us.\n", + "\n", + "### Learn more about eDisGo\n", + "\n", + "* __[eDisGo Source Code](https://github.com/openego/eDisGo)__\n", + "* __[eDisGo Documentation](http://edisgo.readthedocs.io/en/dev/)__\n", + "\n", + "### Table of Contents\n", + "\n", + "* [The eDisGo API](#settings)\n", + "* [The eDisGo data container and grid data structure](#network)\n", + "* [Future generator capacities](#generator_scenario)\n", + "* [Grid reinforcement](#grid_reinforcement)\n", + "* [Evaluate results](#evaluation)\n", + "* [References](#references)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## About the example\n", + "\n", + "This example shows the general usage of eDisGo. Grid expansion costs for an example distribution grid (see image below) are calculated assuming additional renewable energy generators as stated in the open_eGo 'NEP 2035' scenario (based on the scenario framework of the German grid development plan (Netzentwicklungsplan) for the year 2035) and conducting a worst-case analysis. Moreover, the eDisGo network data structure and how to access the results are introduced. At the end of the example grid expansion costs for a different scenario are calculated and compared to the grid expansion costs in the 'NEP 2035' scenario.\n", + "\n", + "\n", + "\n", + "**Let's get started!** First of all we have to import some packages." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Import packages" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/Birgit.Schachler/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: .\n", + " \"\"\")\n", + "INFO:keyring.backend:Loading SecretService\n", + "INFO:keyring.backend:Loading kwallet\n", + "INFO:keyring.backend:Loading macOS\n", + "INFO:keyring.backend:Loading windows\n", + "INFO:keyring.backend:Loading Gnome\n", + "INFO:keyring.backend:Loading Google\n", + "INFO:keyring.backend:Loading Windows (alt)\n", + "INFO:keyring.backend:Loading file\n", + "INFO:keyring.backend:Loading keyczar\n", + "INFO:keyring.backend:Loading multi\n", + "INFO:keyring.backend:Loading pyfs\n" + ] + } + ], + "source": [ + "import os\n", + "import sys\n", + "import pandas as pd\n", + "\n", + "from edisgo import EDisGo\n", + "\n", + "import logging\n", + "logging.basicConfig(level=logging.ERROR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The eDisGo API \n", + "\n", + "The top-level API for setting up your scenario, invoking grid expansion and flexibility measures, etc. is provided by the **EDisGo class** (see [class documentation](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.EDisGo) for more information).\n", + "\n", + "In this example we simply want to do a worst-case analysis of a ding0 grid. For this, we only have to provide a grid and set the 'worst_case_analysis' parameter. \n", + "\n", + "#### Specifying the ding0 grid\n", + "\n", + "The ding0 grid is specified through the input parameter 'ding0_grid'. The following assumes you have a file of a ding0 grid named “ding0_grids__6.pkl” in current working directory.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "ding0_grid = os.path.join(sys.path[0], \"ding0_grids_239_DPv0.4.0.pkl\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Specifying worst-case analysis\n", + "\n", + "As worst-case analysis you can either just analyse the feed-in or the load case or do a combined analysis. Choose between the following options:\n", + "\n", + "* **’worst-case-feedin’** \n", + " \n", + " Feed-in and demand for the worst-case scenario \"feed-in case\" are generated. Demand is set to 15% of maximum demand for loads connected to the MV grid and 10% for loads connected to the LV grid. Feed-in of all generators is set to nominal power of the generator, except for PV systems where it is set to 85% of the nominal power.\n", + "\n", + " \n", + "* **’worst-case-load’**\n", + "\n", + " Feed-in and demand for the worst-case scenario \"load case\" are generated. Demand of all loads is set to maximum demand. Feed-in of all generators is set to zero.\n", + "\n", + "\n", + "* **’worst-case’**\n", + " \n", + " Feed-in and demand for the two worst-case scenarios \"feed-in case\" and \"load case\" are generated.\n", + "\n", + "Instead of doing a worst-case analysis you can also provide your own timeseries for demand and feed-in and use those in the network analysis. EDisGo also offers methods to generate load and feed-in time series. Check out the [EDisGo class documentation](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.EDisGo) for more information." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "worst_case_analysis = 'worst-case-feedin'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we are ready to initialize the edisgo API object." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "edisgo = EDisGo(ding0_grid=ding0_grid,\n", + " worst_case_analysis=worst_case_analysis)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The eDisGo data container and grid data structure \n", + "\n", + "The last line, besides a couple of other things, initialized the [Network class](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Network) which serves as an overall data container in eDisGo holding the grid data for the [MV grid](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.grids.MVGrid) and [LV grids](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.grids.LVGrid), [config data](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Config), [results](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Results), [timeseries](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.TimeSeries), etc. It is linked from multiple locations and provides hierarchical access to all data. Network itself can be accessed via the EDisGo API object as follows:\n", + "\n", + "```python\n", + "edisgo.network\n", + "```\n", + "\n", + "As mentioned *Network* holds the MV grid and LV grids. The grid topology is represented by separate undirected graphs for the MV grid and each of the LV grids. Each of these graphs is an eDisGo [Graph](http://edisgo.readthedocs.io/en/dev/_modules/edisgo/grid/grids.html#Graph), which is subclassed from networkx.Graph and extended by extra-functionality. Lines represent edges in the graph. Other equipment is represented by a node. Let's have a look into the graph.\n", + "\n", + "First we take a look at all the **lines**." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{GeneratorFluctuating_839361: {GeneratorFluctuating_878867: {'type': 'line',\n", + " 'line': Line_2390003},\n", + " BranchTee_MVGrid_239_86: {'type': 'line', 'line': Line_2390004}},\n", + " GeneratorFluctuating_839362: {BranchTee_MVGrid_239_87: {'type': 'line',\n", + " 'line': Line_2390005}},\n", + " GeneratorFluctuating_839363: {BranchTee_MVGrid_239_88: {'type': 'line',\n", + " 'line': Line_2390006}},\n", + " GeneratorFluctuating_839364: {LVStation_119612: {'type': 'line',\n", + " 'line': Line_2390007}},\n", + " GeneratorFluctuating_878450: {BranchTee_MVGrid_239_89: {'type': 'line',\n", + " 'line': Line_2390008}},\n", + " GeneratorFluctuating_878583: {BranchTee_MVGrid_239_84: {'type': 'line',\n", + " 'line': Line_2390001}},\n", + " GeneratorFluctuating_878609: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390009}},\n", + " GeneratorFluctuating_878611: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390010}},\n", + " GeneratorFluctuating_878614: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390011}},\n", + " GeneratorFluctuating_878615: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390012}},\n", + " GeneratorFluctuating_878862: {BranchTee_MVGrid_239_90: {'type': 'line',\n", + " 'line': Line_2390013}},\n", + " GeneratorFluctuating_878863: {BranchTee_MVGrid_239_91: {'type': 'line',\n", + " 'line': Line_2390014}},\n", + " GeneratorFluctuating_878864: {BranchTee_MVGrid_239_92: {'type': 'line',\n", + " 'line': Line_2390015}},\n", + " GeneratorFluctuating_878865: {BranchTee_MVGrid_239_93: {'type': 'line',\n", + " 'line': Line_2390016}},\n", + " GeneratorFluctuating_878866: {BranchTee_MVGrid_239_94: {'type': 'line',\n", + " 'line': Line_2390017}},\n", + " GeneratorFluctuating_878867: {GeneratorFluctuating_839361: {'type': 'line',\n", + " 'line': Line_2390003}},\n", + " GeneratorFluctuating_878875: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390018}},\n", + " GeneratorFluctuating_878950: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390019}},\n", + " GeneratorFluctuating_878963: {BranchTee_MVGrid_239_85: {'type': 'line',\n", + " 'line': Line_2390002}},\n", + " BranchTee_MVGrid_239_1: {LVStation_119897: {'type': 'line',\n", + " 'line': Line_2390050},\n", + " MVDisconnectingPoint_1: {'line': Line_2390252, 'type': 'line'}},\n", + " BranchTee_MVGrid_239_2: {MVStation_239: {'type': 'line',\n", + " 'line': Line_2390266},\n", + " MVDisconnectingPoint_2: {'line': Line_2390265, 'type': 'line'}},\n", + " BranchTee_MVGrid_239_3: {LVStation_125269: {'type': 'line',\n", + " 'line': Line_2390181},\n", + " BranchTee_MVGrid_239_88: {'type': 'line', 'line': Line_2390273},\n", + " BranchTee_MVGrid_239_89: {'type': 'line', 'line': Line_2390274}},\n", + " BranchTee_MVGrid_239_4: {LVStation_119904: {'type': 'line',\n", + " 'line': Line_2390055},\n", + " BranchTee_MVGrid_239_18: {'type': 'line', 'line': Line_2390251},\n", + " BranchTee_MVGrid_239_20: {'type': 'line', 'line': Line_2390254}},\n", + " BranchTee_MVGrid_239_5: {LVStation_120736: {'type': 'line',\n", + " 'line': Line_2390071},\n", + " BranchTee_MVGrid_239_6: {'type': 'line', 'line': Line_2390277},\n", + " BranchTee_MVGrid_239_8: {'type': 'line', 'line': Line_2390278}},\n", + " BranchTee_MVGrid_239_6: {LVStation_120411: {'type': 'line',\n", + " 'line': Line_2390064},\n", + " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390260},\n", + " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390277}},\n", + " BranchTee_MVGrid_239_7: {LVStation_120470: {'type': 'line',\n", + " 'line': Line_2390065},\n", + " LVStation_419726: {'type': 'line', 'line': Line_2390224},\n", + " BranchTee_MVGrid_239_8: {'type': 'line', 'line': Line_2390285}},\n", + " BranchTee_MVGrid_239_8: {LVStation_419795: {'type': 'line',\n", + " 'line': Line_2390225},\n", + " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390278},\n", + " BranchTee_MVGrid_239_7: {'type': 'line', 'line': Line_2390285}},\n", + " BranchTee_MVGrid_239_9: {LVStation_120555: {'type': 'line',\n", + " 'line': Line_2390067},\n", + " LVStation_120585: {'type': 'line', 'line': Line_2390069},\n", + " BranchTee_MVGrid_239_10: {'type': 'line', 'line': Line_2390242}},\n", + " BranchTee_MVGrid_239_10: {LVStation_511325: {'type': 'line',\n", + " 'line': Line_2390238},\n", + " BranchTee_MVGrid_239_13: {'type': 'line', 'line': Line_2390241},\n", + " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390242}},\n", + " BranchTee_MVGrid_239_11: {LVStation_120898: {'type': 'line',\n", + " 'line': Line_2390076},\n", + " LVStation_418244: {'type': 'line', 'line': Line_2390219},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390243}},\n", + " BranchTee_MVGrid_239_12: {LVStation_120943: {'type': 'line',\n", + " 'line': Line_2390080},\n", + " LVStation_419885: {'type': 'line', 'line': Line_2390226},\n", + " BranchTee_MVGrid_239_84: {'type': 'line', 'line': Line_2390244}},\n", + " BranchTee_MVGrid_239_13: {LVStation_121317: {'type': 'line',\n", + " 'line': Line_2390087},\n", + " LVStation_416441: {'type': 'line', 'line': Line_2390204},\n", + " BranchTee_MVGrid_239_10: {'type': 'line', 'line': Line_2390241}},\n", + " BranchTee_MVGrid_239_14: {LVStation_121289: {'type': 'line',\n", + " 'line': Line_2390086},\n", + " LVStation_417530: {'type': 'line', 'line': Line_2390211},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390245}},\n", + " BranchTee_MVGrid_239_15: {LVStation_121776: {'type': 'line',\n", + " 'line': Line_2390096},\n", + " BranchTee_MVGrid_239_29: {'type': 'line', 'line': Line_2390246},\n", + " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390247}},\n", + " BranchTee_MVGrid_239_16: {LVStation_417276: {'type': 'line',\n", + " 'line': Line_2390210},\n", + " BranchTee_MVGrid_239_19: {'type': 'line', 'line': Line_2390248},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390249}},\n", + " BranchTee_MVGrid_239_17: {LVStation_124110: {'type': 'line',\n", + " 'line': Line_2390148},\n", + " LVStation_416815: {'type': 'line', 'line': Line_2390207},\n", + " LVStation_416983: {'type': 'line', 'line': Line_2390209}},\n", + " BranchTee_MVGrid_239_18: {LVStation_121940: {'type': 'line',\n", + " 'line': Line_2390110},\n", + " BranchTee_MVGrid_239_29: {'type': 'line', 'line': Line_2390250},\n", + " BranchTee_MVGrid_239_4: {'type': 'line', 'line': Line_2390251}},\n", + " BranchTee_MVGrid_239_19: {LVStation_121919: {'type': 'line',\n", + " 'line': Line_2390108},\n", + " LVStation_416589: {'type': 'line', 'line': Line_2390206},\n", + " BranchTee_MVGrid_239_16: {'type': 'line', 'line': Line_2390248}},\n", + " BranchTee_MVGrid_239_20: {LVStation_419079: {'type': 'line',\n", + " 'line': Line_2390223},\n", + " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390253},\n", + " BranchTee_MVGrid_239_4: {'type': 'line', 'line': Line_2390254}},\n", + " BranchTee_MVGrid_239_21: {LVStation_121879: {'type': 'line',\n", + " 'line': Line_2390099},\n", + " LVStation_122400: {'type': 'line', 'line': Line_2390128},\n", + " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390255}},\n", + " BranchTee_MVGrid_239_22: {LVStation_122077: {'type': 'line',\n", + " 'line': Line_2390115},\n", + " LVStation_122078: {'type': 'line', 'line': Line_2390117},\n", + " LVStation_418237: {'type': 'line', 'line': Line_2390218}},\n", + " BranchTee_MVGrid_239_23: {LVStation_124085: {'type': 'line',\n", + " 'line': Line_2390144},\n", + " BranchTee_MVGrid_239_43: {'type': 'line', 'line': Line_2390256},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390257}},\n", + " BranchTee_MVGrid_239_24: {LVStation_416449: {'type': 'line',\n", + " 'line': Line_2390205},\n", + " BranchTee_MVGrid_239_70: {'type': 'line', 'line': Line_2390258},\n", + " BranchTee_MVGrid_239_85: {'type': 'line', 'line': Line_2390259}},\n", + " BranchTee_MVGrid_239_25: {LVStation_503036: {'type': 'line',\n", + " 'line': Line_2390237},\n", + " BranchTee_MVGrid_239_6: {'type': 'line', 'line': Line_2390260},\n", + " BranchTee_MVGrid_239_76: {'type': 'line', 'line': Line_2390261}},\n", + " BranchTee_MVGrid_239_26: {LVStation_417550: {'type': 'line',\n", + " 'line': Line_2390213},\n", + " LVStation_417936: {'type': 'line', 'line': Line_2390216},\n", + " BranchTee_MVGrid_239_64: {'type': 'line', 'line': Line_2390262}},\n", + " BranchTee_MVGrid_239_27: {LVStation_417909: {'type': 'line',\n", + " 'line': Line_2390215},\n", + " LVStation_417987: {'type': 'line', 'line': Line_2390217},\n", + " BranchTee_MVGrid_239_78: {'type': 'line', 'line': Line_2390263}},\n", + " BranchTee_MVGrid_239_28: {LVStation_122077: {'type': 'line',\n", + " 'line': Line_2390116},\n", + " LVStation_122426: {'type': 'line', 'line': Line_2390132},\n", + " BranchTee_MVGrid_239_54: {'type': 'line', 'line': Line_2390264}},\n", + " BranchTee_MVGrid_239_29: {LVStation_122480: {'type': 'line',\n", + " 'line': Line_2390133},\n", + " BranchTee_MVGrid_239_15: {'type': 'line', 'line': Line_2390246},\n", + " BranchTee_MVGrid_239_18: {'type': 'line', 'line': Line_2390250}},\n", + " BranchTee_MVGrid_239_30: {BranchTee_MVGrid_239_32: {'type': 'line',\n", + " 'line': Line_2390267},\n", + " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390268},\n", + " BranchTee_MVGrid_239_91: {'type': 'line', 'line': Line_2390269}},\n", + " BranchTee_MVGrid_239_31: {LVStation_139149: {'type': 'line',\n", + " 'line': Line_2390189},\n", + " LVStation_139186: {'type': 'line', 'line': Line_2390198},\n", + " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390270}},\n", + " BranchTee_MVGrid_239_32: {LVStation_139104: {'type': 'line',\n", + " 'line': Line_2390184},\n", + " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390267},\n", + " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390271}},\n", + " BranchTee_MVGrid_239_33: {LVStation_419885: {'type': 'line',\n", + " 'line': Line_2390227},\n", + " BranchTee_MVGrid_239_32: {'type': 'line', 'line': Line_2390271},\n", + " BranchTee_MVGrid_239_86: {'type': 'line', 'line': Line_2390272}},\n", + " BranchTee_MVGrid_239_34: {LVStation_139150: {'type': 'line',\n", + " 'line': Line_2390191},\n", + " BranchTee_MVGrid_239_21: {'type': 'line', 'line': Line_2390255},\n", + " BranchTee_MVGrid_239_31: {'type': 'line', 'line': Line_2390270}},\n", + " BranchTee_MVGrid_239_35: {LVStation_122231: {'type': 'line',\n", + " 'line': Line_2390127},\n", + " BranchTee_MVGrid_239_20: {'type': 'line', 'line': Line_2390253},\n", + " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390268}},\n", + " BranchTee_MVGrid_239_36: {LVStation_118322: {'type': 'line',\n", + " 'line': Line_2390020},\n", + " LVStation_118323: {'type': 'line', 'line': Line_2390021},\n", + " LVStation_124111: {'type': 'line', 'line': Line_2390150}},\n", + " BranchTee_MVGrid_239_37: {LVStation_118324: {'type': 'line',\n", + " 'line': Line_2390023},\n", + " LVStation_515314: {'type': 'line', 'line': Line_2390240}},\n", + " BranchTee_MVGrid_239_38: {LVStation_119612: {'type': 'line',\n", + " 'line': Line_2390024},\n", + " LVStation_119613: {'type': 'line', 'line': Line_2390026},\n", + " LVStation_417898: {'type': 'line', 'line': Line_2390214}},\n", + " BranchTee_MVGrid_239_39: {LVStation_119698: {'type': 'line',\n", + " 'line': Line_2390029}},\n", + " BranchTee_MVGrid_239_40: {LVStation_119701: {'type': 'line',\n", + " 'line': Line_2390032},\n", + " LVStation_119704: {'type': 'line', 'line': Line_2390035}},\n", + " BranchTee_MVGrid_239_41: {LVStation_119891: {'type': 'line',\n", + " 'line': Line_2390040},\n", + " LVStation_119894: {'type': 'line', 'line': Line_2390044},\n", + " LVStation_119895: {'type': 'line', 'line': Line_2390046}},\n", + " BranchTee_MVGrid_239_42: {LVStation_119896: {'type': 'line',\n", + " 'line': Line_2390048},\n", + " LVStation_119900: {'type': 'line', 'line': Line_2390052},\n", + " BranchTee_MVGrid_239_43: {'type': 'line', 'line': Line_2390275}},\n", + " BranchTee_MVGrid_239_43: {LVStation_119901: {'type': 'line',\n", + " 'line': Line_2390053},\n", + " BranchTee_MVGrid_239_23: {'type': 'line', 'line': Line_2390256},\n", + " BranchTee_MVGrid_239_42: {'type': 'line', 'line': Line_2390275}},\n", + " BranchTee_MVGrid_239_44: {LVStation_119892: {'type': 'line',\n", + " 'line': Line_2390041},\n", + " LVStation_119893: {'type': 'line', 'line': Line_2390042},\n", + " LVStation_119895: {'type': 'line', 'line': Line_2390047},\n", + " LVStation_119896: {'type': 'line', 'line': Line_2390049},\n", + " BranchTee_MVGrid_239_93: {'type': 'line', 'line': Line_2390276}},\n", + " BranchTee_MVGrid_239_45: {LVStation_119903: {'type': 'line',\n", + " 'line': Line_2390054},\n", + " LVStation_119904: {'type': 'line', 'line': Line_2390056}},\n", + " BranchTee_MVGrid_239_46: {LVStation_120387: {'type': 'line',\n", + " 'line': Line_2390059},\n", + " LVStation_120390: {'type': 'line', 'line': Line_2390063}},\n", + " BranchTee_MVGrid_239_47: {LVStation_120555: {'type': 'line',\n", + " 'line': Line_2390066},\n", + " LVStation_500916: {'type': 'line', 'line': Line_2390235}},\n", + " BranchTee_MVGrid_239_48: {LVStation_120737: {'type': 'line',\n", + " 'line': Line_2390072},\n", + " LVStation_120738: {'type': 'line', 'line': Line_2390073}},\n", + " BranchTee_MVGrid_239_49: {LVStation_120853: {'type': 'line',\n", + " 'line': Line_2390075},\n", + " LVStation_511325: {'type': 'line', 'line': Line_2390239}},\n", + " BranchTee_MVGrid_239_50: {LVStation_120898: {'type': 'line',\n", + " 'line': Line_2390077},\n", + " LVStation_120899: {'type': 'line', 'line': Line_2390078}},\n", + " BranchTee_MVGrid_239_51: {LVStation_120942: {'type': 'line',\n", + " 'line': Line_2390079},\n", + " LVStation_120943: {'type': 'line', 'line': Line_2390081}},\n", + " BranchTee_MVGrid_239_52: {LVStation_121286: {'type': 'line',\n", + " 'line': Line_2390082},\n", + " LVStation_121287: {'type': 'line', 'line': Line_2390083},\n", + " LVStation_121288: {'type': 'line', 'line': Line_2390085}},\n", + " BranchTee_MVGrid_239_53: {LVStation_121317: {'type': 'line',\n", + " 'line': Line_2390088},\n", + " LVStation_121318: {'type': 'line', 'line': Line_2390090}},\n", + " BranchTee_MVGrid_239_54: {LVStation_121742: {'type': 'line',\n", + " 'line': Line_2390093},\n", + " LVStation_121743: {'type': 'line', 'line': Line_2390095},\n", + " BranchTee_MVGrid_239_28: {'type': 'line', 'line': Line_2390264}},\n", + " BranchTee_MVGrid_239_55: {LVStation_121741: {'type': 'line',\n", + " 'line': Line_2390092},\n", + " LVStation_121742: {'type': 'line', 'line': Line_2390094},\n", + " LVStation_122230: {'type': 'line', 'line': Line_2390124},\n", + " LVStation_496409: {'type': 'line', 'line': Line_2390233},\n", + " BranchTee_MVGrid_239_15: {'type': 'line', 'line': Line_2390247}},\n", + " BranchTee_MVGrid_239_56: {LVStation_121879: {'type': 'line',\n", + " 'line': Line_2390100},\n", + " LVStation_121880: {'type': 'line', 'line': Line_2390102}},\n", + " BranchTee_MVGrid_239_57: {LVStation_121915: {'type': 'line',\n", + " 'line': Line_2390105},\n", + " LVStation_121916: {'type': 'line', 'line': Line_2390107},\n", + " LVStation_121919: {'type': 'line', 'line': Line_2390109}},\n", + " BranchTee_MVGrid_239_58: {LVStation_121940: {'type': 'line',\n", + " 'line': Line_2390111},\n", + " LVStation_121941: {'type': 'line', 'line': Line_2390112}},\n", + " BranchTee_MVGrid_239_59: {LVStation_122076: {'type': 'line',\n", + " 'line': Line_2390113},\n", + " LVStation_122078: {'type': 'line', 'line': Line_2390118}},\n", + " BranchTee_MVGrid_239_60: {LVStation_122123: {'type': 'line',\n", + " 'line': Line_2390120},\n", + " LVStation_122124: {'type': 'line', 'line': Line_2390121},\n", + " LVStation_122125: {'type': 'line', 'line': Line_2390123}},\n", + " BranchTee_MVGrid_239_61: {LVStation_122230: {'type': 'line',\n", + " 'line': Line_2390125}},\n", + " BranchTee_MVGrid_239_62: {LVStation_122400: {'type': 'line',\n", + " 'line': Line_2390129},\n", + " LVStation_122401: {'type': 'line', 'line': Line_2390130}},\n", + " BranchTee_MVGrid_239_63: {LVStation_122408: {'type': 'line',\n", + " 'line': Line_2390131},\n", + " LVStation_485974: {'type': 'line', 'line': Line_2390228}},\n", + " BranchTee_MVGrid_239_64: {LVStation_122696: {'type': 'line',\n", + " 'line': Line_2390135},\n", + " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390262},\n", + " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390279}},\n", + " BranchTee_MVGrid_239_65: {LVStation_122698: {'type': 'line',\n", + " 'line': Line_2390138},\n", + " LVStation_122699: {'type': 'line', 'line': Line_2390139},\n", + " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390280}},\n", + " BranchTee_MVGrid_239_66: {LVStation_122697: {'type': 'line',\n", + " 'line': Line_2390136},\n", + " LVStation_418254: {'type': 'line', 'line': Line_2390220},\n", + " BranchTee_MVGrid_239_64: {'type': 'line', 'line': Line_2390279},\n", + " BranchTee_MVGrid_239_65: {'type': 'line', 'line': Line_2390280},\n", + " BranchTee_MVGrid_239_90: {'type': 'line', 'line': Line_2390281}},\n", + " BranchTee_MVGrid_239_67: {LVStation_124010: {'type': 'line',\n", + " 'line': Line_2390141},\n", + " LVStation_124011: {'type': 'line', 'line': Line_2390143}},\n", + " BranchTee_MVGrid_239_68: {LVStation_124085: {'type': 'line',\n", + " 'line': Line_2390145},\n", + " LVStation_124086: {'type': 'line', 'line': Line_2390146}},\n", + " BranchTee_MVGrid_239_69: {LVStation_124109: {'type': 'line',\n", + " 'line': Line_2390147},\n", + " LVStation_124110: {'type': 'line', 'line': Line_2390149},\n", + " LVStation_124111: {'type': 'line', 'line': Line_2390151}},\n", + " BranchTee_MVGrid_239_70: {LVStation_498758: {'type': 'line',\n", + " 'line': Line_2390234},\n", + " BranchTee_MVGrid_239_24: {'type': 'line', 'line': Line_2390258},\n", + " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390282}},\n", + " BranchTee_MVGrid_239_71: {LVStation_124582: {'type': 'line',\n", + " 'line': Line_2390154},\n", + " LVStation_124583: {'type': 'line', 'line': Line_2390156},\n", + " BranchTee_MVGrid_239_70: {'type': 'line', 'line': Line_2390282}},\n", + " BranchTee_MVGrid_239_72: {LVStation_124911: {'type': 'line',\n", + " 'line': Line_2390160}},\n", + " BranchTee_MVGrid_239_73: {LVStation_125016: {'type': 'line',\n", + " 'line': Line_2390163},\n", + " LVStation_500931: {'type': 'line', 'line': Line_2390236},\n", + " BranchTee_MVGrid_239_74: {'type': 'line', 'line': Line_2390283}},\n", + " BranchTee_MVGrid_239_74: {LVStation_125015: {'type': 'line',\n", + " 'line': Line_2390162},\n", + " LVStation_125017: {'type': 'line', 'line': Line_2390165},\n", + " BranchTee_MVGrid_239_73: {'type': 'line', 'line': Line_2390283}},\n", + " BranchTee_MVGrid_239_75: {LVStation_125210: {'type': 'line',\n", + " 'line': Line_2390166},\n", + " LVStation_125211: {'type': 'line', 'line': Line_2390169},\n", + " MVDisconnectingPoint_4: {'line': Line_2390284, 'type': 'line'}},\n", + " BranchTee_MVGrid_239_76: {LVStation_125214: {'type': 'line',\n", + " 'line': Line_2390172},\n", + " LVStation_125215: {'type': 'line', 'line': Line_2390174},\n", + " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390261}},\n", + " BranchTee_MVGrid_239_77: {LVStation_124911: {'type': 'line',\n", + " 'line': Line_2390161},\n", + " LVStation_125213: {'type': 'line', 'line': Line_2390171},\n", + " LVStation_125214: {'type': 'line', 'line': Line_2390173}},\n", + " BranchTee_MVGrid_239_78: {LVStation_125267: {'type': 'line',\n", + " 'line': Line_2390178},\n", + " LVStation_125268: {'type': 'line', 'line': Line_2390179},\n", + " BranchTee_MVGrid_239_27: {'type': 'line', 'line': Line_2390263}},\n", + " BranchTee_MVGrid_239_79: {LVStation_125268: {'type': 'line',\n", + " 'line': Line_2390180},\n", + " LVStation_125269: {'type': 'line', 'line': Line_2390182}},\n", + " BranchTee_MVGrid_239_80: {LVStation_139104: {'type': 'line',\n", + " 'line': Line_2390185},\n", + " LVStation_139105: {'type': 'line', 'line': Line_2390186},\n", + " LVStation_139106: {'type': 'line', 'line': Line_2390187}},\n", + " BranchTee_MVGrid_239_81: {LVStation_139150: {'type': 'line',\n", + " 'line': Line_2390192}},\n", + " BranchTee_MVGrid_239_82: {LVStation_139183: {'type': 'line',\n", + " 'line': Line_2390195},\n", + " LVStation_488816: {'type': 'line', 'line': Line_2390231},\n", + " LVStation_490253: {'type': 'line', 'line': Line_2390232}},\n", + " BranchTee_MVGrid_239_83: {LVStation_139186: {'type': 'line',\n", + " 'line': Line_2390199},\n", + " LVStation_139187: {'type': 'line', 'line': Line_2390201},\n", + " MVDisconnectingPoint_5: {'line': Line_2390197, 'type': 'line'}},\n", + " BranchTee_MVGrid_239_84: {GeneratorFluctuating_878583: {'type': 'line',\n", + " 'line': Line_2390001},\n", + " LVStation_120738: {'type': 'line', 'line': Line_2390074},\n", + " BranchTee_MVGrid_239_12: {'type': 'line', 'line': Line_2390244}},\n", + " BranchTee_MVGrid_239_85: {GeneratorFluctuating_878963: {'type': 'line',\n", + " 'line': Line_2390002},\n", + " LVStation_416815: {'type': 'line', 'line': Line_2390208},\n", + " BranchTee_MVGrid_239_24: {'type': 'line', 'line': Line_2390259}},\n", + " BranchTee_MVGrid_239_86: {GeneratorFluctuating_839361: {'type': 'line',\n", + " 'line': Line_2390004},\n", + " LVStation_139107: {'type': 'line', 'line': Line_2390188},\n", + " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390272}},\n", + " BranchTee_MVGrid_239_87: {GeneratorFluctuating_839362: {'type': 'line',\n", + " 'line': Line_2390005},\n", + " LVStation_119698: {'type': 'line', 'line': Line_2390030},\n", + " LVStation_119889: {'type': 'line', 'line': Line_2390037}},\n", + " BranchTee_MVGrid_239_88: {GeneratorFluctuating_839363: {'type': 'line',\n", + " 'line': Line_2390006},\n", + " LVStation_125210: {'type': 'line', 'line': Line_2390167},\n", + " BranchTee_MVGrid_239_3: {'type': 'line', 'line': Line_2390273}},\n", + " BranchTee_MVGrid_239_89: {GeneratorFluctuating_878450: {'type': 'line',\n", + " 'line': Line_2390008},\n", + " LVStation_418546: {'type': 'line', 'line': Line_2390221},\n", + " BranchTee_MVGrid_239_3: {'type': 'line', 'line': Line_2390274}},\n", + " BranchTee_MVGrid_239_90: {GeneratorFluctuating_878862: {'type': 'line',\n", + " 'line': Line_2390013},\n", + " LVStation_123655: {'type': 'line', 'line': Line_2390140},\n", + " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390281}},\n", + " BranchTee_MVGrid_239_91: {GeneratorFluctuating_878863: {'type': 'line',\n", + " 'line': Line_2390014},\n", + " LVStation_122520: {'type': 'line', 'line': Line_2390134},\n", + " BranchTee_MVGrid_239_30: {'type': 'line', 'line': Line_2390269}},\n", + " BranchTee_MVGrid_239_92: {GeneratorFluctuating_878864: {'type': 'line',\n", + " 'line': Line_2390015},\n", + " LVStation_119897: {'type': 'line', 'line': Line_2390051},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390286}},\n", + " BranchTee_MVGrid_239_93: {GeneratorFluctuating_878865: {'type': 'line',\n", + " 'line': Line_2390016},\n", + " LVStation_120038: {'type': 'line', 'line': Line_2390057},\n", + " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390276}},\n", + " BranchTee_MVGrid_239_94: {GeneratorFluctuating_878866: {'type': 'line',\n", + " 'line': Line_2390017},\n", + " LVStation_124910: {'type': 'line', 'line': Line_2390159},\n", + " LVStation_125216: {'type': 'line', 'line': Line_2390175}},\n", + " LVStation_122408: {BranchTee_MVGrid_239_63: {'type': 'line',\n", + " 'line': Line_2390131}},\n", + " LVStation_485974: {BranchTee_MVGrid_239_63: {'type': 'line',\n", + " 'line': Line_2390228},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390229}},\n", + " LVStation_138585: {LVStation_139149: {'type': 'line', 'line': Line_2390183}},\n", + " LVStation_119895: {LVStation_119899: {'type': 'line', 'line': Line_2390045},\n", + " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390046},\n", + " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390047}},\n", + " LVStation_119896: {BranchTee_MVGrid_239_42: {'type': 'line',\n", + " 'line': Line_2390048},\n", + " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390049}},\n", + " LVStation_119889: {LVStation_119891: {'type': 'line', 'line': Line_2390036},\n", + " BranchTee_MVGrid_239_87: {'type': 'line', 'line': Line_2390037}},\n", + " LVStation_119890: {LVStation_119893: {'type': 'line', 'line': Line_2390038},\n", + " LVStation_416175: {'type': 'line', 'line': Line_2390039}},\n", + " LVStation_119891: {LVStation_119889: {'type': 'line', 'line': Line_2390036},\n", + " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390040}},\n", + " LVStation_119892: {LVStation_119697: {'type': 'line', 'line': Line_2390027},\n", + " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390041}},\n", + " LVStation_119893: {LVStation_119890: {'type': 'line', 'line': Line_2390038},\n", + " BranchTee_MVGrid_239_44: {'type': 'line', 'line': Line_2390042}},\n", + " LVStation_119894: {LVStation_119898: {'type': 'line', 'line': Line_2390043},\n", + " BranchTee_MVGrid_239_41: {'type': 'line', 'line': Line_2390044}},\n", + " LVStation_119897: {BranchTee_MVGrid_239_1: {'type': 'line',\n", + " 'line': Line_2390050},\n", + " BranchTee_MVGrid_239_92: {'type': 'line', 'line': Line_2390051}},\n", + " LVStation_119898: {LVStation_119894: {'type': 'line', 'line': Line_2390043}},\n", + " LVStation_119899: {LVStation_119895: {'type': 'line', 'line': Line_2390045}},\n", + " LVStation_119900: {BranchTee_MVGrid_239_42: {'type': 'line',\n", + " 'line': Line_2390052}},\n", + " LVStation_119901: {BranchTee_MVGrid_239_43: {'type': 'line',\n", + " 'line': Line_2390053}},\n", + " LVStation_417530: {BranchTee_MVGrid_239_14: {'type': 'line',\n", + " 'line': Line_2390211}},\n", + " LVStation_419885: {BranchTee_MVGrid_239_12: {'type': 'line',\n", + " 'line': Line_2390226},\n", + " BranchTee_MVGrid_239_33: {'type': 'line', 'line': Line_2390227}},\n", + " LVStation_121940: {BranchTee_MVGrid_239_18: {'type': 'line',\n", + " 'line': Line_2390110},\n", + " BranchTee_MVGrid_239_58: {'type': 'line', 'line': Line_2390111}},\n", + " LVStation_121941: {BranchTee_MVGrid_239_58: {'type': 'line',\n", + " 'line': Line_2390112}},\n", + " LVStation_122426: {BranchTee_MVGrid_239_28: {'type': 'line',\n", + " 'line': Line_2390132}},\n", + " LVStation_122480: {BranchTee_MVGrid_239_29: {'type': 'line',\n", + " 'line': Line_2390133}},\n", + " LVStation_418254: {LVStation_124543: {'type': 'line', 'line': Line_2390152},\n", + " BranchTee_MVGrid_239_66: {'type': 'line', 'line': Line_2390220}},\n", + " LVStation_419605: {LVStation_125217: {'type': 'line', 'line': Line_2390176}},\n", + " LVStation_416441: {BranchTee_MVGrid_239_13: {'type': 'line',\n", + " 'line': Line_2390204}},\n", + " LVStation_418546: {BranchTee_MVGrid_239_89: {'type': 'line',\n", + " 'line': Line_2390221}},\n", + " LVStation_416244: {LVStation_119697: {'type': 'line', 'line': Line_2390028}},\n", + " LVStation_417898: {LVStation_417550: {'type': 'line', 'line': Line_2390212},\n", + " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390214}},\n", + " LVStation_419795: {BranchTee_MVGrid_239_8: {'type': 'line',\n", + " 'line': Line_2390225}},\n", + " LVStation_120737: {LVStation_120736: {'type': 'line', 'line': Line_2390070},\n", + " BranchTee_MVGrid_239_48: {'type': 'line', 'line': Line_2390072}},\n", + " LVStation_120736: {LVStation_120737: {'type': 'line', 'line': Line_2390070},\n", + " BranchTee_MVGrid_239_5: {'type': 'line', 'line': Line_2390071}},\n", + " LVStation_120738: {BranchTee_MVGrid_239_48: {'type': 'line',\n", + " 'line': Line_2390073},\n", + " BranchTee_MVGrid_239_84: {'type': 'line', 'line': Line_2390074}},\n", + " LVStation_120942: {BranchTee_MVGrid_239_51: {'type': 'line',\n", + " 'line': Line_2390079}},\n", + " LVStation_120943: {BranchTee_MVGrid_239_12: {'type': 'line',\n", + " 'line': Line_2390080},\n", + " BranchTee_MVGrid_239_51: {'type': 'line', 'line': Line_2390081}},\n", + " LVStation_122230: {BranchTee_MVGrid_239_55: {'type': 'line',\n", + " 'line': Line_2390124},\n", + " BranchTee_MVGrid_239_61: {'type': 'line', 'line': Line_2390125}},\n", + " LVStation_122231: {LVStation_139192: {'type': 'line', 'line': Line_2390126},\n", + " BranchTee_MVGrid_239_35: {'type': 'line', 'line': Line_2390127}},\n", + " LVStation_418237: {BranchTee_MVGrid_239_22: {'type': 'line',\n", + " 'line': Line_2390218}},\n", + " LVStation_416449: {BranchTee_MVGrid_239_24: {'type': 'line',\n", + " 'line': Line_2390205}},\n", + " LVStation_417550: {LVStation_417898: {'type': 'line', 'line': Line_2390212},\n", + " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390213}},\n", + " LVStation_139107: {BranchTee_MVGrid_239_86: {'type': 'line',\n", + " 'line': Line_2390188}},\n", + " LVStation_120585: {LVStation_124583: {'type': 'line', 'line': Line_2390068},\n", + " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390069}},\n", + " LVStation_417276: {BranchTee_MVGrid_239_16: {'type': 'line',\n", + " 'line': Line_2390210}},\n", + " LVStation_122520: {BranchTee_MVGrid_239_91: {'type': 'line',\n", + " 'line': Line_2390134}},\n", + " LVStation_419726: {BranchTee_MVGrid_239_7: {'type': 'line',\n", + " 'line': Line_2390224}},\n", + " LVStation_121776: {BranchTee_MVGrid_239_15: {'type': 'line',\n", + " 'line': Line_2390096}},\n", + " LVStation_419327: {LVStation_124910: {'type': 'line', 'line': Line_2390158}},\n", + " LVStation_417734: {LVStation_139189: {'type': 'line', 'line': Line_2390203}},\n", + " LVStation_125015: {BranchTee_MVGrid_239_74: {'type': 'line',\n", + " 'line': Line_2390162}},\n", + " LVStation_125016: {LVStation_120389: {'type': 'line', 'line': Line_2390062},\n", + " BranchTee_MVGrid_239_73: {'type': 'line', 'line': Line_2390163}},\n", + " LVStation_125017: {LVStation_417909: {'type': 'line', 'line': Line_2390164},\n", + " BranchTee_MVGrid_239_74: {'type': 'line', 'line': Line_2390165}},\n", + " LVStation_500931: {BranchTee_MVGrid_239_73: {'type': 'line',\n", + " 'line': Line_2390236}},\n", + " LVStation_418244: {LVStation_122698: {'type': 'line', 'line': Line_2390137},\n", + " BranchTee_MVGrid_239_11: {'type': 'line', 'line': Line_2390219}},\n", + " LVStation_120411: {BranchTee_MVGrid_239_6: {'type': 'line',\n", + " 'line': Line_2390064}},\n", + " LVStation_121317: {BranchTee_MVGrid_239_13: {'type': 'line',\n", + " 'line': Line_2390087},\n", + " BranchTee_MVGrid_239_53: {'type': 'line', 'line': Line_2390088}},\n", + " LVStation_121318: {LVStation_121918: {'type': 'line', 'line': Line_2390089},\n", + " BranchTee_MVGrid_239_53: {'type': 'line', 'line': Line_2390090}},\n", + " LVStation_416815: {BranchTee_MVGrid_239_17: {'type': 'line',\n", + " 'line': Line_2390207},\n", + " BranchTee_MVGrid_239_85: {'type': 'line', 'line': Line_2390208}},\n", + " LVStation_139104: {BranchTee_MVGrid_239_32: {'type': 'line',\n", + " 'line': Line_2390184},\n", + " BranchTee_MVGrid_239_80: {'type': 'line', 'line': Line_2390185}},\n", + " LVStation_139105: {BranchTee_MVGrid_239_80: {'type': 'line',\n", + " 'line': Line_2390186}},\n", + " LVStation_139106: {BranchTee_MVGrid_239_80: {'type': 'line',\n", + " 'line': Line_2390187}},\n", + " LVStation_139192: {LVStation_122231: {'type': 'line', 'line': Line_2390126}},\n", + " LVStation_119903: {BranchTee_MVGrid_239_45: {'type': 'line',\n", + " 'line': Line_2390054}},\n", + " LVStation_119904: {BranchTee_MVGrid_239_4: {'type': 'line',\n", + " 'line': Line_2390055},\n", + " BranchTee_MVGrid_239_45: {'type': 'line', 'line': Line_2390056}},\n", + " LVStation_418449: {LVStation_121741: {'type': 'line', 'line': Line_2390091}},\n", + " LVStation_419079: {BranchTee_MVGrid_239_20: {'type': 'line',\n", + " 'line': Line_2390223}},\n", + " LVStation_119697: {LVStation_119892: {'type': 'line', 'line': Line_2390027},\n", + " LVStation_416244: {'type': 'line', 'line': Line_2390028}},\n", + " LVStation_119698: {BranchTee_MVGrid_239_39: {'type': 'line',\n", + " 'line': Line_2390029},\n", + " BranchTee_MVGrid_239_87: {'type': 'line', 'line': Line_2390030}},\n", + " LVStation_122076: {BranchTee_MVGrid_239_59: {'type': 'line',\n", + " 'line': Line_2390113},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390114}},\n", + " LVStation_122077: {BranchTee_MVGrid_239_22: {'type': 'line',\n", + " 'line': Line_2390115},\n", + " BranchTee_MVGrid_239_28: {'type': 'line', 'line': Line_2390116}},\n", + " LVStation_122078: {BranchTee_MVGrid_239_22: {'type': 'line',\n", + " 'line': Line_2390117},\n", + " BranchTee_MVGrid_239_59: {'type': 'line', 'line': Line_2390118}},\n", + " LVStation_122124: {LVStation_121878: {'type': 'line', 'line': Line_2390097},\n", + " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390121}},\n", + " LVStation_122125: {LVStation_490253: {'type': 'line', 'line': Line_2390122},\n", + " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390123}},\n", + " LVStation_122122: {LVStation_122123: {'type': 'line', 'line': Line_2390119}},\n", + " LVStation_122123: {LVStation_122122: {'type': 'line', 'line': Line_2390119},\n", + " BranchTee_MVGrid_239_60: {'type': 'line', 'line': Line_2390120}},\n", + " LVStation_124543: {LVStation_418254: {'type': 'line', 'line': Line_2390152}},\n", + " LVStation_124911: {BranchTee_MVGrid_239_72: {'type': 'line',\n", + " 'line': Line_2390160},\n", + " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390161}},\n", + " LVStation_124910: {LVStation_419327: {'type': 'line', 'line': Line_2390158},\n", + " BranchTee_MVGrid_239_94: {'type': 'line', 'line': Line_2390159}},\n", + " LVStation_139183: {LVStation_139184: {'type': 'line', 'line': Line_2390194},\n", + " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390195}},\n", + " LVStation_139184: {LVStation_139183: {'type': 'line', 'line': Line_2390194},\n", + " LVStation_139185: {'type': 'line', 'line': Line_2390196}},\n", + " LVStation_139185: {LVStation_139184: {'type': 'line', 'line': Line_2390196}},\n", + " LVStation_139186: {BranchTee_MVGrid_239_31: {'type': 'line',\n", + " 'line': Line_2390198},\n", + " BranchTee_MVGrid_239_83: {'type': 'line', 'line': Line_2390199}},\n", + " LVStation_139187: {LVStation_139188: {'type': 'line', 'line': Line_2390200},\n", + " BranchTee_MVGrid_239_83: {'type': 'line', 'line': Line_2390201}},\n", + " LVStation_139188: {LVStation_139187: {'type': 'line', 'line': Line_2390200},\n", + " LVStation_139189: {'type': 'line', 'line': Line_2390202}},\n", + " LVStation_139189: {LVStation_139188: {'type': 'line', 'line': Line_2390202},\n", + " LVStation_417734: {'type': 'line', 'line': Line_2390203}},\n", + " LVStation_488816: {LVStation_490252: {'type': 'line', 'line': Line_2390230},\n", + " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390231}},\n", + " LVStation_490252: {LVStation_488816: {'type': 'line', 'line': Line_2390230}},\n", + " LVStation_490253: {LVStation_122125: {'type': 'line', 'line': Line_2390122},\n", + " BranchTee_MVGrid_239_82: {'type': 'line', 'line': Line_2390232}},\n", + " LVStation_118322: {BranchTee_MVGrid_239_36: {'type': 'line',\n", + " 'line': Line_2390020}},\n", + " LVStation_118323: {BranchTee_MVGrid_239_36: {'type': 'line',\n", + " 'line': Line_2390021},\n", + " MVDisconnectingPoint_3: {'line': Line_2390022, 'type': 'line'}},\n", + " LVStation_118324: {BranchTee_MVGrid_239_37: {'type': 'line',\n", + " 'line': Line_2390023}},\n", + " LVStation_515314: {LVStation_119702: {'type': 'line', 'line': Line_2390033},\n", + " BranchTee_MVGrid_239_37: {'type': 'line', 'line': Line_2390240}},\n", + " LVStation_120387: {LVStation_120388: {'type': 'line', 'line': Line_2390058},\n", + " BranchTee_MVGrid_239_46: {'type': 'line', 'line': Line_2390059}},\n", + " LVStation_120388: {LVStation_120387: {'type': 'line', 'line': Line_2390058},\n", + " LVStation_121287: {'type': 'line', 'line': Line_2390060}},\n", + " LVStation_120389: {LVStation_120390: {'type': 'line', 'line': Line_2390061},\n", + " LVStation_125016: {'type': 'line', 'line': Line_2390062}},\n", + " LVStation_120390: {LVStation_120389: {'type': 'line', 'line': Line_2390061},\n", + " BranchTee_MVGrid_239_46: {'type': 'line', 'line': Line_2390063}},\n", + " LVStation_120853: {BranchTee_MVGrid_239_49: {'type': 'line',\n", + " 'line': Line_2390075}},\n", + " LVStation_511325: {BranchTee_MVGrid_239_10: {'type': 'line',\n", + " 'line': Line_2390238},\n", + " BranchTee_MVGrid_239_49: {'type': 'line', 'line': Line_2390239}},\n", + " LVStation_120470: {BranchTee_MVGrid_239_7: {'type': 'line',\n", + " 'line': Line_2390065}},\n", + " LVStation_417987: {BranchTee_MVGrid_239_27: {'type': 'line',\n", + " 'line': Line_2390217}},\n", + " LVStation_119612: {GeneratorFluctuating_839364: {'type': 'line',\n", + " 'line': Line_2390007},\n", + " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390024}},\n", + " LVStation_119613: {LVStation_119703: {'type': 'line', 'line': Line_2390025},\n", + " BranchTee_MVGrid_239_38: {'type': 'line', 'line': Line_2390026}},\n", + " LVStation_119701: {LVStation_119702: {'type': 'line', 'line': Line_2390031},\n", + " BranchTee_MVGrid_239_40: {'type': 'line', 'line': Line_2390032}},\n", + " LVStation_119702: {LVStation_119701: {'type': 'line', 'line': Line_2390031},\n", + " LVStation_515314: {'type': 'line', 'line': Line_2390033}},\n", + " LVStation_119703: {LVStation_119613: {'type': 'line', 'line': Line_2390025},\n", + " LVStation_119704: {'type': 'line', 'line': Line_2390034}},\n", + " LVStation_119704: {LVStation_119703: {'type': 'line', 'line': Line_2390034},\n", + " BranchTee_MVGrid_239_40: {'type': 'line', 'line': Line_2390035}},\n", + " LVStation_120038: {BranchTee_MVGrid_239_93: {'type': 'line',\n", + " 'line': Line_2390057}},\n", + " LVStation_120555: {BranchTee_MVGrid_239_47: {'type': 'line',\n", + " 'line': Line_2390066},\n", + " BranchTee_MVGrid_239_9: {'type': 'line', 'line': Line_2390067}},\n", + " LVStation_500916: {BranchTee_MVGrid_239_47: {'type': 'line',\n", + " 'line': Line_2390235}},\n", + " LVStation_418547: {LVStation_496409: {'type': 'line', 'line': Line_2390222}},\n", + " LVStation_121286: {BranchTee_MVGrid_239_52: {'type': 'line',\n", + " 'line': Line_2390082}},\n", + " LVStation_121287: {LVStation_120388: {'type': 'line', 'line': Line_2390060},\n", + " BranchTee_MVGrid_239_52: {'type': 'line', 'line': Line_2390083}},\n", + " LVStation_121288: {LVStation_121289: {'type': 'line', 'line': Line_2390084},\n", + " BranchTee_MVGrid_239_52: {'type': 'line', 'line': Line_2390085}},\n", + " LVStation_121289: {LVStation_121288: {'type': 'line', 'line': Line_2390084},\n", + " BranchTee_MVGrid_239_14: {'type': 'line', 'line': Line_2390086}},\n", + " LVStation_121741: {LVStation_418449: {'type': 'line', 'line': Line_2390091},\n", + " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390092}},\n", + " LVStation_121742: {BranchTee_MVGrid_239_54: {'type': 'line',\n", + " 'line': Line_2390093},\n", + " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390094}},\n", + " LVStation_121743: {BranchTee_MVGrid_239_54: {'type': 'line',\n", + " 'line': Line_2390095}},\n", + " LVStation_496409: {LVStation_418547: {'type': 'line', 'line': Line_2390222},\n", + " BranchTee_MVGrid_239_55: {'type': 'line', 'line': Line_2390233}},\n", + " LVStation_416983: {BranchTee_MVGrid_239_17: {'type': 'line',\n", + " 'line': Line_2390209}},\n", + " LVStation_121878: {LVStation_122124: {'type': 'line', 'line': Line_2390097},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390098}},\n", + " LVStation_121879: {BranchTee_MVGrid_239_21: {'type': 'line',\n", + " 'line': Line_2390099},\n", + " BranchTee_MVGrid_239_56: {'type': 'line', 'line': Line_2390100}},\n", + " LVStation_121880: {LVStation_496410: {'type': 'line', 'line': Line_2390101},\n", + " BranchTee_MVGrid_239_56: {'type': 'line', 'line': Line_2390102},\n", + " MVStation_239: {'type': 'line', 'line': Line_2390103}},\n", + " LVStation_496410: {LVStation_121880: {'type': 'line', 'line': Line_2390101}},\n", + " LVStation_121915: {LVStation_121918: {'type': 'line', 'line': Line_2390104},\n", + " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390105}},\n", + " LVStation_121916: {LVStation_121917: {'type': 'line', 'line': Line_2390106},\n", + " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390107}},\n", + " LVStation_121917: {LVStation_121916: {'type': 'line', 'line': Line_2390106}},\n", + " LVStation_121918: {LVStation_121318: {'type': 'line', 'line': Line_2390089},\n", + " LVStation_121915: {'type': 'line', 'line': Line_2390104}},\n", + " LVStation_121919: {BranchTee_MVGrid_239_19: {'type': 'line',\n", + " 'line': Line_2390108},\n", + " BranchTee_MVGrid_239_57: {'type': 'line', 'line': Line_2390109}},\n", + " LVStation_416589: {BranchTee_MVGrid_239_19: {'type': 'line',\n", + " 'line': Line_2390206}},\n", + " LVStation_122400: {BranchTee_MVGrid_239_21: {'type': 'line',\n", + " 'line': Line_2390128},\n", + " BranchTee_MVGrid_239_62: {'type': 'line', 'line': Line_2390129}},\n", + " LVStation_122401: {BranchTee_MVGrid_239_62: {'type': 'line',\n", + " 'line': Line_2390130}},\n", + " LVStation_122696: {BranchTee_MVGrid_239_64: {'type': 'line',\n", + " 'line': Line_2390135}},\n", + " LVStation_122697: {BranchTee_MVGrid_239_66: {'type': 'line',\n", + " 'line': Line_2390136}},\n", + " LVStation_122698: {LVStation_418244: {'type': 'line', 'line': Line_2390137},\n", + " BranchTee_MVGrid_239_65: {'type': 'line', 'line': Line_2390138}},\n", + " LVStation_122699: {BranchTee_MVGrid_239_65: {'type': 'line',\n", + " 'line': Line_2390139}},\n", + " LVStation_123655: {BranchTee_MVGrid_239_90: {'type': 'line',\n", + " 'line': Line_2390140}},\n", + " LVStation_124010: {BranchTee_MVGrid_239_67: {'type': 'line',\n", + " 'line': Line_2390141}},\n", + " LVStation_124011: {LVStation_124109: {'type': 'line', 'line': Line_2390142},\n", + " BranchTee_MVGrid_239_67: {'type': 'line', 'line': Line_2390143}},\n", + " LVStation_124109: {LVStation_124011: {'type': 'line', 'line': Line_2390142},\n", + " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390147}},\n", + " LVStation_124110: {BranchTee_MVGrid_239_17: {'type': 'line',\n", + " 'line': Line_2390148},\n", + " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390149}},\n", + " LVStation_124111: {BranchTee_MVGrid_239_36: {'type': 'line',\n", + " 'line': Line_2390150},\n", + " BranchTee_MVGrid_239_69: {'type': 'line', 'line': Line_2390151}},\n", + " LVStation_417936: {LVStation_124902: {'type': 'line', 'line': Line_2390157},\n", + " BranchTee_MVGrid_239_26: {'type': 'line', 'line': Line_2390216}},\n", + " LVStation_124902: {LVStation_417936: {'type': 'line', 'line': Line_2390157}},\n", + " LVStation_416175: {LVStation_119890: {'type': 'line', 'line': Line_2390039}},\n", + " LVStation_125210: {BranchTee_MVGrid_239_75: {'type': 'line',\n", + " 'line': Line_2390166},\n", + " BranchTee_MVGrid_239_88: {'type': 'line', 'line': Line_2390167}},\n", + " LVStation_125211: {LVStation_125212: {'type': 'line', 'line': Line_2390168},\n", + " BranchTee_MVGrid_239_75: {'type': 'line', 'line': Line_2390169}},\n", + " LVStation_125212: {LVStation_125211: {'type': 'line', 'line': Line_2390168}},\n", + " LVStation_125213: {LVStation_125216: {'type': 'line', 'line': Line_2390170},\n", + " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390171}},\n", + " LVStation_125214: {BranchTee_MVGrid_239_76: {'type': 'line',\n", + " 'line': Line_2390172},\n", + " BranchTee_MVGrid_239_77: {'type': 'line', 'line': Line_2390173}},\n", + " LVStation_125215: {BranchTee_MVGrid_239_76: {'type': 'line',\n", + " 'line': Line_2390174}},\n", + " LVStation_125216: {LVStation_125213: {'type': 'line', 'line': Line_2390170},\n", + " BranchTee_MVGrid_239_94: {'type': 'line', 'line': Line_2390175}},\n", + " LVStation_125217: {LVStation_419605: {'type': 'line', 'line': Line_2390176},\n", + " LVStation_503036: {'type': 'line', 'line': Line_2390177}},\n", + " LVStation_503036: {LVStation_125217: {'type': 'line', 'line': Line_2390177},\n", + " BranchTee_MVGrid_239_25: {'type': 'line', 'line': Line_2390237}},\n", + " LVStation_125269: {BranchTee_MVGrid_239_3: {'type': 'line',\n", + " 'line': Line_2390181},\n", + " BranchTee_MVGrid_239_79: {'type': 'line', 'line': Line_2390182}},\n", + " LVStation_125267: {BranchTee_MVGrid_239_78: {'type': 'line',\n", + " 'line': Line_2390178}},\n", + " LVStation_125268: {BranchTee_MVGrid_239_78: {'type': 'line',\n", + " 'line': Line_2390179},\n", + " BranchTee_MVGrid_239_79: {'type': 'line', 'line': Line_2390180}},\n", + " LVStation_120898: {BranchTee_MVGrid_239_11: {'type': 'line',\n", + " 'line': Line_2390076},\n", + " BranchTee_MVGrid_239_50: {'type': 'line', 'line': Line_2390077}},\n", + " LVStation_120899: {BranchTee_MVGrid_239_50: {'type': 'line',\n", + " 'line': Line_2390078}},\n", + " LVStation_139149: {LVStation_138585: {'type': 'line', 'line': Line_2390183},\n", + " BranchTee_MVGrid_239_31: {'type': 'line', 'line': Line_2390189}},\n", + " LVStation_139150: {LVStation_139151: {'type': 'line', 'line': Line_2390190},\n", + " BranchTee_MVGrid_239_34: {'type': 'line', 'line': Line_2390191},\n", + " BranchTee_MVGrid_239_81: {'type': 'line', 'line': Line_2390192}},\n", + " LVStation_139151: {LVStation_139150: {'type': 'line', 'line': Line_2390190},\n", + " LVStation_139152: {'type': 'line', 'line': Line_2390193}},\n", + " LVStation_139152: {LVStation_139151: {'type': 'line', 'line': Line_2390193}},\n", + " LVStation_417909: {LVStation_125017: {'type': 'line', 'line': Line_2390164},\n", + " BranchTee_MVGrid_239_27: {'type': 'line', 'line': Line_2390215}},\n", + " LVStation_124085: {BranchTee_MVGrid_239_23: {'type': 'line',\n", + " 'line': Line_2390144},\n", + " BranchTee_MVGrid_239_68: {'type': 'line', 'line': Line_2390145}},\n", + " LVStation_124086: {BranchTee_MVGrid_239_68: {'type': 'line',\n", + " 'line': Line_2390146}},\n", + " LVStation_124581: {LVStation_124582: {'type': 'line', 'line': Line_2390153}},\n", + " LVStation_124582: {LVStation_124581: {'type': 'line', 'line': Line_2390153},\n", + " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390154}},\n", + " LVStation_124583: {LVStation_120585: {'type': 'line', 'line': Line_2390068},\n", + " LVStation_124584: {'type': 'line', 'line': Line_2390155},\n", + " BranchTee_MVGrid_239_71: {'type': 'line', 'line': Line_2390156}},\n", + " LVStation_124584: {LVStation_124583: {'type': 'line', 'line': Line_2390155}},\n", + " LVStation_498758: {BranchTee_MVGrid_239_70: {'type': 'line',\n", + " 'line': Line_2390234}},\n", + " MVStation_239: {GeneratorFluctuating_878609: {'type': 'line',\n", + " 'line': Line_2390009},\n", + " GeneratorFluctuating_878611: {'type': 'line', 'line': Line_2390010},\n", + " GeneratorFluctuating_878614: {'type': 'line', 'line': Line_2390011},\n", + " GeneratorFluctuating_878615: {'type': 'line', 'line': Line_2390012},\n", + " GeneratorFluctuating_878875: {'type': 'line', 'line': Line_2390018},\n", + " GeneratorFluctuating_878950: {'type': 'line', 'line': Line_2390019},\n", + " LVStation_121878: {'type': 'line', 'line': Line_2390098},\n", + " LVStation_121880: {'type': 'line', 'line': Line_2390103},\n", + " LVStation_122076: {'type': 'line', 'line': Line_2390114},\n", + " LVStation_485974: {'type': 'line', 'line': Line_2390229},\n", + " BranchTee_MVGrid_239_11: {'type': 'line', 'line': Line_2390243},\n", + " BranchTee_MVGrid_239_14: {'type': 'line', 'line': Line_2390245},\n", + " BranchTee_MVGrid_239_16: {'type': 'line', 'line': Line_2390249},\n", + " BranchTee_MVGrid_239_23: {'type': 'line', 'line': Line_2390257},\n", + " BranchTee_MVGrid_239_2: {'type': 'line', 'line': Line_2390266},\n", + " BranchTee_MVGrid_239_92: {'type': 'line', 'line': Line_2390286}},\n", + " MVDisconnectingPoint_1: {BranchTee_MVGrid_239_1: {'line': Line_2390252,\n", + " 'type': 'line'}},\n", + " MVDisconnectingPoint_2: {BranchTee_MVGrid_239_2: {'line': Line_2390265,\n", + " 'type': 'line'}},\n", + " MVDisconnectingPoint_3: {LVStation_118323: {'line': Line_2390022,\n", + " 'type': 'line'}},\n", + " MVDisconnectingPoint_4: {BranchTee_MVGrid_239_75: {'line': Line_2390284,\n", + " 'type': 'line'}},\n", + " MVDisconnectingPoint_5: {BranchTee_MVGrid_239_83: {'line': Line_2390197,\n", + " 'type': 'line'}}}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get a dictionary of all lines in the mv grid\n", + "edisgo.network.mv_grid.graph.edge" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The dictionary you got should look something like that:\n", + "```python\n", + "{Generator_x: {BranchTee_y: {'type': 'line', 'line': Line_1}},\n", + " BranchTee_y: {\n", + " Generator_x: {'type': 'line', 'line': Line_1},\n", + " BranchTee_z: {'type': 'line', 'line': Line_2}}\n", + "```\n", + "\n", + "That means that Generator_x is connected to BranchTee_y by Line_1 and BranchTee_y is also connected to BranchTee_z by Line_2. Line_1 and Line_2 are [Line](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.components.Line) objects containig all important information about the line, such as length, equipment type, and geometry. Accessing this information can for example be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.3681789122707058" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "edge_dictionary = edisgo.network.mv_grid.graph.edge\n", + "# get random line\n", + "line = edge_dictionary.popitem()[1].popitem()[1]['line']\n", + "# get line length\n", + "line.length" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's have a look at all the **nodes**." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[GeneratorFluctuating_839361,\n", + " GeneratorFluctuating_839362,\n", + " GeneratorFluctuating_839363,\n", + " GeneratorFluctuating_839364,\n", + " GeneratorFluctuating_878450,\n", + " GeneratorFluctuating_878583,\n", + " GeneratorFluctuating_878609,\n", + " GeneratorFluctuating_878611,\n", + " GeneratorFluctuating_878614,\n", + " GeneratorFluctuating_878615]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get a list of all nodes (stations, generators, loads, branch tees)\n", + "# here, only the first 10 nodes are displayed\n", + "edisgo.network.mv_grid.graph.nodes()[:10]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also filter for certain kinds of nodes, e.g. generators..." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[GeneratorFluctuating_839361,\n", + " GeneratorFluctuating_839362,\n", + " GeneratorFluctuating_839363,\n", + " GeneratorFluctuating_839364,\n", + " GeneratorFluctuating_878450,\n", + " GeneratorFluctuating_878583,\n", + " GeneratorFluctuating_878609,\n", + " GeneratorFluctuating_878611,\n", + " GeneratorFluctuating_878614,\n", + " GeneratorFluctuating_878615,\n", + " GeneratorFluctuating_878862,\n", + " GeneratorFluctuating_878863,\n", + " GeneratorFluctuating_878864,\n", + " GeneratorFluctuating_878865,\n", + " GeneratorFluctuating_878866,\n", + " GeneratorFluctuating_878867,\n", + " GeneratorFluctuating_878875,\n", + " GeneratorFluctuating_878950,\n", + " GeneratorFluctuating_878963]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get a list of all generators in the mv grid\n", + "edisgo.network.mv_grid.graph.nodes_by_attribute('generator')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "... or get a list of all lv grids." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[LVGrid_122408,\n", + " LVGrid_485974,\n", + " LVGrid_138585,\n", + " LVGrid_119895,\n", + " LVGrid_119896,\n", + " LVGrid_119889,\n", + " LVGrid_119890,\n", + " LVGrid_119891,\n", + " LVGrid_119892,\n", + " LVGrid_119893]" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get a list of all lv grids\n", + "# here, only the first 10 lv grids are displayed\n", + "list(edisgo.network.mv_grid.lv_grids)[:10]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Future generator capacities \n", + "\n", + "In the open_eGo project we developed two future scenarios, the 'NEP 2035' and the 'ego 100' scenario. The 'NEP 2035' scenario closely follows the B2-Scenario 2035 from the German network developement plan (Netzentwicklungsplan NEP) 2015. The share of renewables is 65.8%, electricity demand is assumed to stay the same as in the status quo. The 'ego 100' scenario is based on the e-Highway 2050 scenario X-7 and assumes a share of renewables of 100% and again an equal electricity demand as in the status quo.\n", + "\n", + "As mentioned earlier, ding0 grids represent status quo networks and generator capacities. In order to analyse future scenarios the future generator park has to be imported." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:Right now only solar and wind generators can be imported from the oedb.\n" + ] + }, + { + "ename": "ProgrammingError", + "evalue": "(psycopg2.ProgrammingError) relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n [SQL: 'SELECT model_draft.ego_supply_res_powerplant_nep2035_mview.id, model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.la_id, model_draft.ego_supply_res_powerplant_nep2035_mview.mvlv_subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.electrical_capacity, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_subtype, model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.rea_geom_new, %(ST_Transform_1)s)) AS geom, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.geom, %(ST_Transform_2)s)) AS geom_em \\nFROM model_draft.ego_supply_res_powerplant_nep2035_mview \\nWHERE model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id = %(subst_id_1)s AND model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type IN (%(generation_type_1)s, %(generation_type_2)s) AND model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level IN (%(voltage_level_1)s, %(voltage_level_2)s)'] [parameters: {'ST_Transform_1': 4326, 'ST_Transform_2': 4326, 'subst_id_1': 239, 'generation_type_1': 'solar', 'generation_type_2': 'wind', 'voltage_level_1': 4, 'voltage_level_2': 5}] (Background on this error at: http://sqlalche.me/e/f405)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mProgrammingError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1192\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1193\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1194\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/default.py\u001b[0m in \u001b[0;36mdo_execute\u001b[0;34m(self, cursor, statement, parameters, context)\u001b[0m\n\u001b[1;32m 506\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 507\u001b[0;31m \u001b[0mcursor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 508\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mProgrammingError\u001b[0m: relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001b[0;31mProgrammingError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Import generators\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mscenario\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'nep2035'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0medisgo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_generators\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgenerator_scenario\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mscenario\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/grid/network.py\u001b[0m in \u001b[0;36mimport_generators\u001b[0;34m(self, generator_scenario)\u001b[0m\n\u001b[1;32m 326\u001b[0m \u001b[0mdata_source\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'oedb'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m import_generators(network=self.network,\n\u001b[0;32m--> 328\u001b[0;31m data_source=data_source)\n\u001b[0m\u001b[1;32m 329\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 330\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0manalyze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36mimport_generators\u001b[0;34m(network, data_source, file)\u001b[0m\n\u001b[1;32m 984\u001b[0m logging.warning('Right now only solar and wind generators can be '\n\u001b[1;32m 985\u001b[0m 'imported from the oedb.')\n\u001b[0;32m--> 986\u001b[0;31m \u001b[0m_import_genos_from_oedb\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 987\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mdata_source\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'pypsa'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 988\u001b[0m \u001b[0m_import_genos_from_pypsa\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36m_import_genos_from_oedb\u001b[0;34m(network)\u001b[0m\n\u001b[1;32m 1844\u001b[0m \u001b[0;31m#generators_conv_mv = _import_conv_generators()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1845\u001b[0m generators_res_mv, generators_res_lv = _import_res_generators(\n\u001b[0;32m-> 1846\u001b[0;31m types_condition)\n\u001b[0m\u001b[1;32m 1847\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1848\u001b[0m \u001b[0;31m#generators_mv = generators_conv_mv.append(generators_res_mv)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/git_repos/eDisGo/edisgo/data/import_data.py\u001b[0m in \u001b[0;36m_import_res_generators\u001b[0;34m(types_filter)\u001b[0m\n\u001b[1;32m 1089\u001b[0m generators_mv = pd.read_sql_query(generators_mv_sqla.statement,\n\u001b[1;32m 1090\u001b[0m \u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1091\u001b[0;31m index_col='id')\n\u001b[0m\u001b[1;32m 1092\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1093\u001b[0m \u001b[0;31m# define generators with unknown subtype as 'unknown'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mread_sql_query\u001b[0;34m(sql, con, index_col, coerce_float, params, parse_dates, chunksize)\u001b[0m\n\u001b[1;32m 330\u001b[0m return pandas_sql.read_query(\n\u001b[1;32m 331\u001b[0m \u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex_col\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mindex_col\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoerce_float\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoerce_float\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 332\u001b[0;31m parse_dates=parse_dates, chunksize=chunksize)\n\u001b[0m\u001b[1;32m 333\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 334\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mread_query\u001b[0;34m(self, sql, index_col, coerce_float, parse_dates, params, chunksize)\u001b[0m\n\u001b[1;32m 1085\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_convert_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1086\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1087\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1088\u001b[0m \u001b[0mcolumns\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1089\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/pandas/io/sql.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 976\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 977\u001b[0m \u001b[0;34m\"\"\"Simple passthrough to SQLAlchemy connectable\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 978\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconnectable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 979\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 980\u001b[0m def read_table(self, table_name, index_col=None, coerce_float=True,\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, statement, *multiparams, **params)\u001b[0m\n\u001b[1;32m 2073\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2074\u001b[0m \u001b[0mconnection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontextual_connect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mclose_with_result\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2075\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2076\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2077\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mscalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, object, *multiparams, **params)\u001b[0m\n\u001b[1;32m 946\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mObjectNotExecutableError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 947\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 948\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmeth\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 949\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 950\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_execute_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/sql/elements.py\u001b[0m in \u001b[0;36m_execute_on_connection\u001b[0;34m(self, connection, multiparams, params)\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_execute_on_connection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 268\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msupports_execution\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 269\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_execute_clauseelement\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmultiparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 270\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 271\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mObjectNotExecutableError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_clauseelement\u001b[0;34m(self, elem, multiparams, params)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0mcompiled_sql\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0mdistilled_params\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mcompiled_sql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdistilled_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m )\n\u001b[1;32m 1062\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1198\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1199\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1200\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1201\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1202\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_has_events\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_handle_dbapi_exception\u001b[0;34m(self, e, statement, parameters, cursor, context)\u001b[0m\n\u001b[1;32m 1411\u001b[0m util.raise_from_cause(\n\u001b[1;32m 1412\u001b[0m \u001b[0msqlalchemy_exception\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1413\u001b[0;31m \u001b[0mexc_info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1414\u001b[0m )\n\u001b[1;32m 1415\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/util/compat.py\u001b[0m in \u001b[0;36mraise_from_cause\u001b[0;34m(exception, exc_info)\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0mexc_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexc_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexc_info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[0mcause\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexc_value\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mexc_value\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mexception\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 203\u001b[0;31m \u001b[0mreraise\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexception\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexception\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mexc_tb\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcause\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcause\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpy3k\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/util/compat.py\u001b[0m in \u001b[0;36mreraise\u001b[0;34m(tp, value, tb, cause)\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__cause__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcause\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mtb\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 186\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 187\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/base.py\u001b[0m in \u001b[0;36m_execute_context\u001b[0;34m(self, dialect, constructor, statement, parameters, *args)\u001b[0m\n\u001b[1;32m 1191\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1192\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1193\u001b[0;31m context)\n\u001b[0m\u001b[1;32m 1194\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1195\u001b[0m self._handle_dbapi_exception(\n", + "\u001b[0;32m~/virtualenvs/open_ego_notebook/lib/python3.6/site-packages/sqlalchemy/engine/default.py\u001b[0m in \u001b[0;36mdo_execute\u001b[0;34m(self, cursor, statement, parameters, context)\u001b[0m\n\u001b[1;32m 505\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 506\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 507\u001b[0;31m \u001b[0mcursor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 508\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 509\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdo_execute_no_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcursor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatement\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mProgrammingError\u001b[0m: (psycopg2.ProgrammingError) relation \"model_draft.ego_supply_res_powerplant_nep2035_mview\" does not exist\nLINE 2: FROM model_draft.ego_supply_res_powerplant_nep2035_mview \n ^\n [SQL: 'SELECT model_draft.ego_supply_res_powerplant_nep2035_mview.id, model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.la_id, model_draft.ego_supply_res_powerplant_nep2035_mview.mvlv_subst_id, model_draft.ego_supply_res_powerplant_nep2035_mview.electrical_capacity, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type, model_draft.ego_supply_res_powerplant_nep2035_mview.generation_subtype, model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.rea_geom_new, %(ST_Transform_1)s)) AS geom, ST_AsText(ST_Transform(model_draft.ego_supply_res_powerplant_nep2035_mview.geom, %(ST_Transform_2)s)) AS geom_em \\nFROM model_draft.ego_supply_res_powerplant_nep2035_mview \\nWHERE model_draft.ego_supply_res_powerplant_nep2035_mview.subst_id = %(subst_id_1)s AND model_draft.ego_supply_res_powerplant_nep2035_mview.generation_type IN (%(generation_type_1)s, %(generation_type_2)s) AND model_draft.ego_supply_res_powerplant_nep2035_mview.voltage_level IN (%(voltage_level_1)s, %(voltage_level_2)s)'] [parameters: {'ST_Transform_1': 4326, 'ST_Transform_2': 4326, 'subst_id_1': 239, 'generation_type_1': 'solar', 'generation_type_2': 'wind', 'voltage_level_1': 4, 'voltage_level_2': 5}] (Background on this error at: http://sqlalche.me/e/f405)" + ] + } + ], + "source": [ + "# Import generators\n", + "scenario = 'nep2035'\n", + "edisgo.import_generators(generator_scenario=scenario)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can have a look at all generators again and compare it to the list of generators created earlier before the import of new generators." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "edisgo.network.mv_grid.graph.nodes_by_attribute('generator')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grid reinforcement \n", + "\n", + "Now we can finally calculate grid expansion costs.\n", + "\n", + "The grid expansion methodology is based on the distribution grid study of dena [[1]](#[1]) and Baden-Wuerttemberg [[2]](#[2]). For now only a combined analysis of MV and LV grids is possible. The order grid expansion measures are conducted is as follows:\n", + "\n", + "* Reinforce transformers and lines due to over-loading issues\n", + "* Reinforce lines in MV grid due to over-voltage issues\n", + "* Reinforce lines in LV grid due to over-loading issues\n", + "* Reinforce transformers and lines due to over-loading issues\n", + "\n", + "Reinforcement of transformers and lines due to over-loading issues is performed twice, once in the beginning and again after fixing over-voltage problems, because the changed power flows after reinforcing the grid may lead to new over-loading issues. (For further explanation see the [documentation](http://edisgo.readthedocs.io/en/dev/features_in_detail.html#automatic-grid-expansion).)\n", + "\n", + "After each reinforcement step a non-linear power flow analyses is conducted using PyPSA. Let's do a power flow analysis before the reinforcement to see how many over-voltage issues there are." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:pypsa.pf:Slack bus for sub-network 0 is Bus_MVStation_239\n", + "INFO:pypsa.pf:Performing non-linear load-flow on AC sub-network SubNetwork 0 for snapshots DatetimeIndex(['1970-01-01'], dtype='datetime64[ns]', freq='H')\n", + "INFO:pypsa.pf:Newton-Raphson solved in 3 iterations with error of 0.000001 in 0.519885 seconds\n" + ] + } + ], + "source": [ + "# Do non-linear power flow analysis with PyPSA\n", + "edisgo.analyze()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Voltage levels for ['GeneratorFluctuating_839361', 'GeneratorFluctuating_839362', 'GeneratorFluctuating_839363', 'GeneratorFluctuating_839364', 'GeneratorFluctuating_878450', 'GeneratorFluctuating_878583', 'GeneratorFluctuating_878609', 'GeneratorFluctuating_878611', 'GeneratorFluctuating_878614', 'GeneratorFluctuating_878615', 'GeneratorFluctuating_878862', 'GeneratorFluctuating_878863', 'GeneratorFluctuating_878864', 'GeneratorFluctuating_878865', 'GeneratorFluctuating_878866', 'GeneratorFluctuating_878867', 'GeneratorFluctuating_878875', 'GeneratorFluctuating_878950', 'GeneratorFluctuating_878963', 'BranchTee_MVGrid_239_1', 'BranchTee_MVGrid_239_2', 'BranchTee_MVGrid_239_3', 'BranchTee_MVGrid_239_4', 'BranchTee_MVGrid_239_5', 'BranchTee_MVGrid_239_6', 'BranchTee_MVGrid_239_7', 'BranchTee_MVGrid_239_8', 'BranchTee_MVGrid_239_9', 'BranchTee_MVGrid_239_10', 'BranchTee_MVGrid_239_11', 'BranchTee_MVGrid_239_12', 'BranchTee_MVGrid_239_13', 'BranchTee_MVGrid_239_14', 'BranchTee_MVGrid_239_15', 'BranchTee_MVGrid_239_16', 'BranchTee_MVGrid_239_17', 'BranchTee_MVGrid_239_18', 'BranchTee_MVGrid_239_19', 'BranchTee_MVGrid_239_20', 'BranchTee_MVGrid_239_21', 'BranchTee_MVGrid_239_22', 'BranchTee_MVGrid_239_23', 'BranchTee_MVGrid_239_24', 'BranchTee_MVGrid_239_25', 'BranchTee_MVGrid_239_26', 'BranchTee_MVGrid_239_27', 'BranchTee_MVGrid_239_28', 'BranchTee_MVGrid_239_29', 'BranchTee_MVGrid_239_30', 'BranchTee_MVGrid_239_31', 'BranchTee_MVGrid_239_32', 'BranchTee_MVGrid_239_33', 'BranchTee_MVGrid_239_34', 'BranchTee_MVGrid_239_35', 'BranchTee_MVGrid_239_36', 'BranchTee_MVGrid_239_37', 'BranchTee_MVGrid_239_38', 'BranchTee_MVGrid_239_39', 'BranchTee_MVGrid_239_40', 'BranchTee_MVGrid_239_41', 'BranchTee_MVGrid_239_42', 'BranchTee_MVGrid_239_43', 'BranchTee_MVGrid_239_44', 'BranchTee_MVGrid_239_45', 'BranchTee_MVGrid_239_46', 'BranchTee_MVGrid_239_47', 'BranchTee_MVGrid_239_48', 'BranchTee_MVGrid_239_49', 'BranchTee_MVGrid_239_50', 'BranchTee_MVGrid_239_51', 'BranchTee_MVGrid_239_52', 'BranchTee_MVGrid_239_53', 'BranchTee_MVGrid_239_54', 'BranchTee_MVGrid_239_55', 'BranchTee_MVGrid_239_56', 'BranchTee_MVGrid_239_57', 'BranchTee_MVGrid_239_58', 'BranchTee_MVGrid_239_59', 'BranchTee_MVGrid_239_60', 'BranchTee_MVGrid_239_61', 'BranchTee_MVGrid_239_62', 'BranchTee_MVGrid_239_63', 'BranchTee_MVGrid_239_64', 'BranchTee_MVGrid_239_65', 'BranchTee_MVGrid_239_66', 'BranchTee_MVGrid_239_67', 'BranchTee_MVGrid_239_68', 'BranchTee_MVGrid_239_69', 'BranchTee_MVGrid_239_70', 'BranchTee_MVGrid_239_71', 'BranchTee_MVGrid_239_72', 'BranchTee_MVGrid_239_73', 'BranchTee_MVGrid_239_74', 'BranchTee_MVGrid_239_75', 'BranchTee_MVGrid_239_76', 'BranchTee_MVGrid_239_77', 'BranchTee_MVGrid_239_78', 'BranchTee_MVGrid_239_79', 'BranchTee_MVGrid_239_80', 'BranchTee_MVGrid_239_81', 'BranchTee_MVGrid_239_82', 'BranchTee_MVGrid_239_83', 'BranchTee_MVGrid_239_84', 'BranchTee_MVGrid_239_85', 'BranchTee_MVGrid_239_86', 'BranchTee_MVGrid_239_87', 'BranchTee_MVGrid_239_88', 'BranchTee_MVGrid_239_89', 'BranchTee_MVGrid_239_90', 'BranchTee_MVGrid_239_91', 'BranchTee_MVGrid_239_92', 'BranchTee_MVGrid_239_93', 'BranchTee_MVGrid_239_94', 'LVStation_122408', 'LVStation_485974', 'LVStation_138585', 'LVStation_119895', 'LVStation_119896', 'LVStation_119889', 'LVStation_119890', 'LVStation_119891', 'LVStation_119892', 'LVStation_119893', 'LVStation_119894', 'LVStation_119897', 'LVStation_119898', 'LVStation_119899', 'LVStation_119900', 'LVStation_119901', 'LVStation_417530', 'LVStation_419885', 'LVStation_121940', 'LVStation_121941', 'LVStation_122426', 'LVStation_122480', 'LVStation_418254', 'LVStation_419605', 'LVStation_416441', 'LVStation_418546', 'LVStation_416244', 'LVStation_417898', 'LVStation_419795', 'LVStation_120737', 'LVStation_120736', 'LVStation_120738', 'LVStation_120942', 'LVStation_120943', 'LVStation_122230', 'LVStation_122231', 'LVStation_418237', 'LVStation_416449', 'LVStation_417550', 'LVStation_139107', 'LVStation_120585', 'LVStation_417276', 'LVStation_122520', 'LVStation_419726', 'LVStation_121776', 'LVStation_419327', 'LVStation_417734', 'LVStation_125015', 'LVStation_125016', 'LVStation_125017', 'LVStation_500931', 'LVStation_418244', 'LVStation_120411', 'LVStation_121317', 'LVStation_121318', 'LVStation_416815', 'LVStation_139104', 'LVStation_139105', 'LVStation_139106', 'LVStation_139192', 'LVStation_119903', 'LVStation_119904', 'LVStation_418449', 'LVStation_419079', 'LVStation_119697', 'LVStation_119698', 'LVStation_122076', 'LVStation_122077', 'LVStation_122078', 'LVStation_122124', 'LVStation_122125', 'LVStation_122122', 'LVStation_122123', 'LVStation_124543', 'LVStation_124911', 'LVStation_124910', 'LVStation_139183', 'LVStation_139184', 'LVStation_139185', 'LVStation_139186', 'LVStation_139187', 'LVStation_139188', 'LVStation_139189', 'LVStation_488816', 'LVStation_490252', 'LVStation_490253', 'LVStation_118322', 'LVStation_118323', 'LVStation_118324', 'LVStation_515314', 'LVStation_120387', 'LVStation_120388', 'LVStation_120389', 'LVStation_120390', 'LVStation_120853', 'LVStation_511325', 'LVStation_120470', 'LVStation_417987', 'LVStation_119612', 'LVStation_119613', 'LVStation_119701', 'LVStation_119702', 'LVStation_119703', 'LVStation_119704', 'LVStation_120038', 'LVStation_120555', 'LVStation_500916', 'LVStation_418547', 'LVStation_121286', 'LVStation_121287', 'LVStation_121288', 'LVStation_121289', 'LVStation_121741', 'LVStation_121742', 'LVStation_121743', 'LVStation_496409', 'LVStation_416983', 'LVStation_121878', 'LVStation_121879', 'LVStation_121880', 'LVStation_496410', 'LVStation_121915', 'LVStation_121916', 'LVStation_121917', 'LVStation_121918', 'LVStation_121919', 'LVStation_416589', 'LVStation_122400', 'LVStation_122401', 'LVStation_122696', 'LVStation_122697', 'LVStation_122698', 'LVStation_122699', 'LVStation_123655', 'LVStation_124010', 'LVStation_124011', 'LVStation_124109', 'LVStation_124110', 'LVStation_124111', 'LVStation_417936', 'LVStation_124902', 'LVStation_416175', 'LVStation_125210', 'LVStation_125211', 'LVStation_125212', 'LVStation_125213', 'LVStation_125214', 'LVStation_125215', 'LVStation_125216', 'LVStation_125217', 'LVStation_503036', 'LVStation_125269', 'LVStation_125267', 'LVStation_125268', 'LVStation_120898', 'LVStation_120899', 'LVStation_139149', 'LVStation_139150', 'LVStation_139151', 'LVStation_139152', 'LVStation_417909', 'LVStation_124085', 'LVStation_124086', 'LVStation_124581', 'LVStation_124582', 'LVStation_124583', 'LVStation_124584', 'LVStation_498758', 'MVStation_239', 'MVDisconnectingPoint_1', 'MVDisconnectingPoint_2', 'MVDisconnectingPoint_3', 'MVDisconnectingPoint_4', 'MVDisconnectingPoint_5'] are not returned from PFA\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
1970-01-01
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: []\n", + "Index: [1970-01-01 00:00:00]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# get voltage at each node from power-flow analysis results\n", + "v_mag_pu_pfa = edisgo.network.results.v_res(nodes=edisgo.network.mv_grid.graph.nodes())\n", + "# set maximum allowed voltage deviation to 10%\n", + "max_v_dev = 0.1\n", + "# find all nodes with a node voltage deviation greater the allowed voltage deviation\n", + "v_mag_pu_pfa[(v_mag_pu_pfa > (1 + max_v_dev))] - 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Reinforcement is invoked doing the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Do grid reinforcement\n", + "edisgo.reinforce()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check for over-voltage issues again:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# get voltage at each node from power-flow analysis results\n", + "v_mag_pu_pfa = edisgo.network.results.v_res(nodes=edisgo.network.mv_grid.graph.nodes())\n", + "# set maximum allowed voltage deviation to 10%\n", + "max_v_dev = 0.1\n", + "# find all nodes with a node voltage deviation greater the allowed voltage deviation\n", + "v_mag_pu_pfa[(v_mag_pu_pfa > (1 + max_v_dev))] - 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate results \n", + "\n", + "Above we already saw how to access voltage results from the power flow analysis. All results are stored in the [Results](http://edisgo.readthedocs.io/en/dev/api/edisgo.grid.html#edisgo.grid.network.Results) object and can be accessed through\n", + "```python\n", + "edisgo.network.results\n", + "```\n", + "\n", + "All changes in the grid conducted during the grid reinforcement, such as removed and new lines and new transformers, can be viewed as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "edisgo.network.results.equipment_changes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also retrieve grid expansion costs through:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "costs = edisgo.network.results.grid_expansion_costs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you are interested in accumulated costs you could group them like that:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# group costs by type\n", + "costs_grouped = costs.groupby(['type']).sum()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An overview of the assumptions used to calculate grid expansion costs can be found in the [documentation]( http://edisgo.readthedocs.io/en/dev/features_in_detail.html#grid-expansion-costs)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's compare the grid expansion costs for the 'NEP 2035' scenario with grid expansion costs for the 'ego 100' scenario. Therefore, we first have to setup the new scenario and calculate grid expansion costs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize new EDisGo object with 'ego 100' scenario\n", + "edisgo_ego100 = EDisGo(ding0_grid=ding0_grid,\n", + " worst_case_analysis=worst_case_analysis,\n", + " generator_scenario='ego100')\n", + "# conduct grid reinforcement\n", + "edisgo_ego100.reinforce()\n", + "# get grouped costs\n", + "costs_grouped_ego100 = edisgo_ego100.network.results.grid_expansion_costs.groupby(['type']).sum()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## References \n", + "\n", + " [1] A.C. Agricola et al.: dena-Verteilnetzstudie: Ausbau- und Innovationsbedarf der Stromverteilnetze in Deutschland bis 2030. 2012.\n", + "\n", + " [2] C. Rehtanz et al.: Verteilnetzstudie für das Land Baden-Württemberg, ef.Ruhr GmbH, 2017." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb b/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb new file mode 100644 index 00000000..0c7f115f --- /dev/null +++ b/ego/examples/tutorials/etrago_OpenMod_Zuerich18.ipynb @@ -0,0 +1,706 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"EUF\"\n", + "\"HSF\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "__copyright__ = \"Zentrum für nachhaltige Energiesysteme Flensburg\"\n", + "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", + "__url__ = \"https://github.com/openego/data_processing/blob/master/LICENSE\"\n", + "__author__ = \"wolfbunke, ulfmueller\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "#
OpenMod Workshop Zürich 2018
\n", + "\n", + "## Open source tools for cross-grid-level electricity grid optimization developed in the open_eGo project - Learn-a-thon\n", + "\n", + "****\n", + "\n", + "### Learn more about\n", + "\n", + "\n", + "* __[open_eGo Project Webpage](https://openegoproject.wordpress.com/)__\n", + "* __[eTraGo Tool Documentation](http://etrago.readthedocs.io/en/latest/index.html)__ \n", + "* __[oedb Tutorials](http://oep.iks.cs.ovgu.de/dataedit/)__ How to use the OpenEnergy Database\n", + "* __[OpenMod Forum](https://forum.openmod-initiative.org/t/learn-a-thon-using-tools-for-cross-grid-level-electricity-grid-optimization-developed-in-the-open-ego-project/856)__ " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"HSF\"\n", + "## Table of Contents \n", + "\n", + "\n", + "* [Getting started with eTraGo](#started)\n", + "* [LOPF Calculation of Germany and neighbours with 10 notes](#d-kmean10)\n", + "* [LOPF Calculation of Schleswig-Holstein](#shcalc)\n", + "* [Using snapshot clustering](#snapshot)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started with eTraGo \n", + "\n", + "\n", + "The python package eTraGo provides a optimization of flexibility options for transmission grids based on PyPSA. A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and ‘upper’ distribution grid is part of eTraGo.\n", + "\n", + "The focus of optimization are flexibility options with a special focus on energy storages and grid expansion measures.\n", + "\n", + "\n", + "\n", + "\n", + "## Installation \n", + "\n", + "Please, find more information on the [README.md](https://github.com/openego/eGo/tree/features/tutorial/ego/examples/tutorials#etrago). \n", + "\n", + "\n", + "## Import eTraGo packages\n", + "\n", + "We are importing the [main function](https://github.com/openego/eTraGo/blob/dev/etrago/appl.py) of eTraGo and its database and plotting functions. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "from numpy import genfromtxt\n", + "np.random.seed()\n", + "import time\n", + "import oedialect\n", + "\n", + "import os\n", + "\n", + "if not 'READTHEDOCS' in os.environ:\n", + " # Sphinx does not run this code.\n", + " # Do not import internal packages directly \n", + " from etrago.tools.io import NetworkScenario, results_to_oedb\n", + " from etrago.tools.plot import (plot_line_loading, plot_stacked_gen,\n", + " add_coordinates, curtailment, gen_dist,\n", + " storage_distribution,storage_expansion)\n", + " from etrago.tools.utilities import (load_shedding, data_manipulation_sh,\n", + " results_to_csv, parallelisation, pf_post_lopf, \n", + " loading_minimization, calc_line_losses, group_parallel_lines)\n", + " from etrago.cluster.networkclustering import busmap_from_psql, cluster_on_extra_high_voltage, kmean_clustering\n", + " from egoio.tools import db\n", + " from sqlalchemy.orm import sessionmaker\n", + " from etrago.appl import etrago\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# enable jupyter interactiv plotting\n", + "%matplotlib notebook\n", + "from ipywidgets import *\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LOPF Calculation of Germany and neighbours with 30 nodes \n", + "\n", + "\n", + "
\n", + "[top](#toc)
\n", + "\n", + "In this section we start our eTraGo calulation with an __Status Quo__ scenario of Germany an its electrical neighbours. For time and performents reasons we are useing the [k-mean](https://de.wikipedia.org/wiki/K-Means-Algorithmus) clustering [functionality of eTraGo](http://etrago.readthedocs.io/en/latest/api/etrago.cluster.html#etrago.cluster.networkclustering.kmean_clustering) and use $k=30$ nodes. For the same reason we choose the time period of __start_snapshot__ and __end_snapshot__ for a day with 24 hours of the scenario year. \n", + "\n", + "\n", + "### Make your calulation settings\n", + "\n", + "A detailed discription of the args python dictionary can be found under . \n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "args_k10 = {# Setup and Configuration:\n", + " 'db': 'oedb', # db session\n", + " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", + " 'method': 'lopf', # lopf or pf\n", + " 'pf_post_lopf': False, # state whether you want to perform a pf after a lopf simulation\n", + " 'start_snapshot': 4393, # 2.07.\n", + " 'end_snapshot' : 4400,\n", + " 'scn_name': 'NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", + " 'solver': 'glpk', # glpk, cplex or gurobi\n", + " # Export options:\n", + " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", + " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", + " 'export': False, # state if you want to export the results back to the database\n", + " # Settings: \n", + " 'storage_extendable':True, # state if you want storages to be installed at each node if necessary.\n", + " 'generator_noise':True, # state if you want to apply a small generator noise \n", + " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given scenario. \n", + " # if so, provide path, e.g. 'noise_values.csv'\n", + " 'minimize_loading':False,\n", + " # Clustering:\n", + " 'k_mean_clustering': 30, # state if you want to perform a k-means clustering on the given network. \n", + " # State False or the value k (e.g. 20).\n", + " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", + " # Simplifications:\n", + " 'parallelisation':False, # state if you want to run snapshots parallely.\n", + " 'skip_snapshots':False,\n", + " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", + " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", + " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", + " 'comments':None }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Start eTraGo calculation with args setting\n", + "# create network object which incluedes all input and output data\n", + "network = etrago(args_k10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# plot generation distripution\n", + "gen_dist(network, techs=None, snapshot=1,n_cols=3,gen_size=0.02)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# plot stacked sum of nominal power for each generator type and timestep\n", + "#fig.set_size_inches(14,14)\n", + "# fix error in .../eGo/ego/examples/tutorials/src/etrago/etrago/tools/plot.py\n", + "# 'wind_offshore':'skyblue', wind_onshore':'skyblue',\n", + "plot_stacked_gen(network, resolution=\"MW\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# make a line loading plot\n", + "# basemade einbauen für hintergrund länder\n", + "fig,ax = plt.subplots(1,1)\n", + "fig.set_size_inches(8,8)\n", + "plot_line_loading(network)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# plot to show extendable storages, if expanded\n", + "storage_expansion(network)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# get storage sizes in MW\n", + "network.storage_units.p_nom_opt.groupby(network.storage_units.carrier, axis=0).sum()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Marginal price per bus node" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# get snapshot\n", + "now = network.snapshots[2]\n", + "\n", + "fig,ax = plt.subplots(1,1)\n", + "fig.set_size_inches(6,4)\n", + "\n", + "network.plot(ax=ax,line_widths=pd.Series(0.5,network.lines.index))\n", + "plt.hexbin(network.buses.x, network.buses.y, \n", + " gridsize=20,\n", + " C=network.buses_t.marginal_price.loc[now],\n", + " cmap=plt.cm.jet)\n", + "\n", + "#for some reason the colorbar only works with graphs plt.plot\n", + "#and must be attached plt.colorbar\n", + "\n", + "cb = plt.colorbar()\n", + "cb.set_label('Locational Marginal Price (EUR/MWh)') " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Change the scnario \n", + "\n", + "* Set 'scn_name': to 'NEP 2035' and recalculate. \n", + "\n", + "\n", + "\n", + "****\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# delete eTraGo object\n", + "#del network" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LOPF Calculation of Schleswig-Holstein \n", + "\n", + "\n", + "
\n", + "[top](#toc)
\n", + "\n", + "\n", + "### Case Schleswig-Holstein\n", + "\n", + "The data set of Schleswing-Holstein is an extract of the main data set and works as an island. The power production and flows of the adjacent network areas are neglected. Therefore, the installed capacity and power production is very high.\n", + "\n", + "For our analysis we used serveral plotting options of eTraGo of [etrago.tools.plot](http://etrago.readthedocs.io/en/latest/api/etrago.tools.html#module-etrago.tools.plot).\n", + "\n", + "\n", + "### Make your settings\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "args_sh = {# Setup and Configuration:\n", + " 'db': 'oedb', # db session\n", + " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", + " 'method': 'lopf', # lopf or pf\n", + " 'pf_post_lopf': False, # state whether you want to perform a pf after a lopf simulation\n", + " 'start_snapshot': 4393, # 6552\n", + " 'end_snapshot' : 4394,\n", + " 'scn_name': 'SH NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", + " 'solver': 'glpk', # glpk, cplex or gurobi\n", + " # Export options:\n", + " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", + " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", + " 'export': False, # state if you want to export the results back to the database\n", + " # Settings: \n", + " 'storage_extendable':False, # state if you want storages to be installed at each node if necessary.\n", + " 'generator_noise':True, # state if you want to apply a small generator noise \n", + " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given.\n", + " # scenario. if so, provide path, e.g. 'noise_values.csv'\n", + " 'minimize_loading':False,\n", + " # Clustering:\n", + " 'k_mean_clustering': False, # state if you want to perform a k-means clustering on the given network. \n", + " # State False or the value k (e.g. 20).\n", + " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", + " # Simplifications:\n", + " 'parallelisation':False, # state if you want to run snapshots parallely.\n", + " 'skip_snapshots':False,\n", + " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", + " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", + " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", + " 'comments':None }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Start eTraGo calculation with args setting\n", + "# create network object \n", + "network = etrago(args_sh)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plot generation distripution\n", + "gen_dist(network, techs=None, snapshot=1,n_cols=3,gen_size=0.02)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plot stacked sum of nominal power for each generator type and timestep\n", + "plot_stacked_gen(network, resolution=\"MW\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plots\n", + "# make a line loading plot\n", + "fig,ax = plt.subplots(1,1)\n", + "fig.set_size_inches(12,10)\n", + "plot_line_loading(network)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plot to show extendable storages\n", + "storage_expansion(network)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# curtailment per carrier / energy source\n", + "curtailment(network, carrier='wind_onshore')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# delete network object\n", + "del network" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using pf after lopf Calculation of Germany and neighbours with 30 nodes \n", + "\n", + "
\n", + "[top](#toc)
\n", + "\n", + "\n", + "In order to compute the grid losses we add an power flow calculation after our liniar opf calculation by setting *pf_post_lopf = True*. \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "args_lopf_pf = {# Setup and Configuration:\n", + " 'db': 'oedb', # db session\n", + " 'gridversion': \"v0.2.11\", # None for model_draft or Version number (e.g. v0.2.11) for grid schema\n", + " 'method': 'lopf', # lopf or pf\n", + " 'pf_post_lopf': True, # state whether you want to perform a pf after a lopf simulation\n", + " 'start_snapshot': 4393, \n", + " 'end_snapshot' : 4417,\n", + " 'scn_name': 'NEP 2035', # state which scenario you want to run: Status Quo, NEP 2035, eGo100\n", + " 'solver': 'glpk', # glpk, cplex or gurobi\n", + " # Export options:\n", + " 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder\n", + " 'results': False, # state if and where you want to save results as csv: False or /path/tofolder\n", + " 'export': False, # state if you want to export the results back to the database\n", + " # Settings: \n", + " 'storage_extendable':False, # state if you want storages to be installed at each node if necessary.\n", + " 'generator_noise':True, # state if you want to apply a small generator noise \n", + " 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given.\n", + " # scenario. if so, provide path, e.g. 'noise_values.csv'\n", + " 'minimize_loading':False,\n", + " # Clustering:\n", + " 'k_mean_clustering': 30, # state if you want to perform a k-means clustering on the given network. \n", + " # State False or the value k (e.g. 20).\n", + " 'network_clustering': False, # state if you want to perform a clustering of HV buses to EHV buses.\n", + " # Simplifications:\n", + " 'parallelisation':False, # state if you want to run snapshots parallely.\n", + " 'skip_snapshots':False,\n", + " 'line_grouping': False, # state if you want to group lines running between the same buses.\n", + " 'branch_capacity_factor': 0.7, # globally extend or lower branch capacities\n", + " 'load_shedding':False, # meet the demand at very high cost; for debugging purposes.\n", + " 'comments':None }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Start eTraGo calculation with args setting\n", + "# create network object\n", + "network = etrago(args_lopf_pf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plot stacked sum of nominal power for each generator type and timestep\n", + "plot_stacked_gen(network, resolution=\"MW\")\n", + "#plt.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# plots\n", + "# make a line loading plot\n", + "fig,ax = plt.subplots(1,1)\n", + "fig.set_size_inches(8,8)\n", + "\n", + "plot_line_loading(network)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Calculate grid losses\n", + "\n", + "We are using the function [calc_line_losses(network)](http://etrago.readthedocs.io/en/latest/_modules/etrago/tools/utilities.html#calc_line_losses)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "\n", + "# calcualte total grid losses\n", + "calc_line_losses(network)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Plot line costs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Line losses\n", + "# calculate apparent power S = sqrt(p² + q²) [in MW]\n", + "s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2).\\\n", + " apply(np.sqrt)) \n", + "\n", + "# calculate current I = S / U [in A]\n", + "i0_lines = np.multiply(s0_lines, 1000000) / np.multiply(network.lines.v_nom, 1000) \n", + "\n", + "# calculate losses per line and timestep network.lines_t.line_losses = I² * R [in MW]\n", + "network.lines_t.losses = np.divide(i0_lines**2 * network.lines.r, 1000000)\n", + "\n", + "# calculate total losses per line [in MW]\n", + "network.lines = network.lines.assign(losses=np.sum(network.lines_t.losses).values)\n", + "\n", + "# prepare plotting\n", + "timestep =1\n", + "cmap = plt.cm.jet\n", + "\n", + "fig,ax = plt.subplots(1,1)\n", + "fig.set_size_inches(6,4)\n", + "\n", + "# do the plotting\n", + "lc= network.plot(line_colors=network.lines.losses, line_cmap=cmap,\n", + " title=\"Line loading\", line_widths=0.55)\n", + "\n", + "cb = plt.colorbar(lc[1])\n", + "cb.set_label('Locational line losses in (EUR/MWh)')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Plot the reactive power" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# get snapshot\n", + "now = network.snapshots[2]\n", + "#plot the reactive power\n", + "fig,ax = plt.subplots(1,1)\n", + "\n", + "fig.set_size_inches(6,6)\n", + "\n", + "q = network.buses_t.q.sum()#.loc[now]\n", + "\n", + "bus_colors = pd.Series(\"r\",network.buses.index)\n", + "bus_colors[q< 0.] = \"b\"\n", + "\n", + "\n", + "network.plot(bus_sizes=abs(q)*0.005,ax=ax,bus_colors=bus_colors,title=\"Reactive power feed-in (red=+ve, blue=-ve)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Have a look into the Programm on Github\n", + "\n", + "* " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ego/examples/tutorials/grid_1476.png b/ego/examples/tutorials/grid_1476.png new file mode 100644 index 00000000..cbef8115 Binary files /dev/null and b/ego/examples/tutorials/grid_1476.png differ diff --git a/ego/examples/tutorials/requirements.yml b/ego/examples/tutorials/requirements.yml new file mode 100644 index 00000000..a672c934 --- /dev/null +++ b/ego/examples/tutorials/requirements.yml @@ -0,0 +1,28 @@ +name: openMod_Zuerich2018 + +channels: + - conda-forge + - anaconda + +dependencies: + - python=3.5 + - notebook # jupyter notebook + - numpy=1.14.3 + - pandas=0.20.3 + - pyyaml=3.12 + - requests + - sqlalchemy + - Rtree=0.8.3 + - scikit-learn + - scipy + - ipywidgets + - pip: + - ding0==0.1.4 + - "--editable=git+git@github.com:openego/eGo.git@dev#egg=eGo --process-dependency-links" + - "--editable=git+git@github.com:openego/oedialect.git@master#egg=oedialect" + - "--editable=git+git@github.com:openego/ego.io.git@v0.4.0#egg=egoio" + - "--editable=git+git@github.com:openego/PyPSA.git@dev#egg=pypsa" + - "--editable=git+git@github.com:openego/eTraGo.git@0.5.1#egg=etrago" # eTraGo==0.5.1 + - "--editable=git+git@github.com:openego/tsam.git@master#egg=tsam" + - "--editable=git+git@github.com:openego/eDisGo.git@dev#egg=edisgo" + diff --git a/ego/run_test.py b/ego/run_test.py new file mode 100644 index 00000000..6bc8755c --- /dev/null +++ b/ego/run_test.py @@ -0,0 +1,46 @@ +from datetime import datetime +from tools.io import eGo +import sys +from pycallgraph import PyCallGraph +from pycallgraph.output import GraphvizOutput +import pandas as pd + + +def main(): + graphviz = GraphvizOutput() + graphviz.output_file = 'basic.png' + date = str(datetime.now()) + print(date) + with PyCallGraph(output=graphviz): + + ego = eGo(jsonpath='scenario_setting_solver_option.json') + + print(ego.etrago.storage_charges) + + print(ego.etrago.storage_investment_costs) + + pd.DataFrame(ego.etrago.storage_investment_costs)\ + .to_csv(date+'__etrago_storage_costs.csv') + print(ego.etrago.grid_investment_costs) + etg_gic = pd.DataFrame(ego.etrago.grid_investment_costs) + etg_gic.to_csv(date+'__etrago_grid_costs.csv') + # test eTraGo plot and functions + + print(ego.edisgo.grid_investment_costs) + edg_gic = pd.DataFrame(ego.edisgo.grid_investment_costs) + edg_gic.to_csv(date+'__edisgo_gridscosts.csv') + + ego.etrago_line_loading() + ego.etrago_stacked_gen() + ego.etrago_gen_dist() + ego.etrago_storage_distribution() + ego.etrago_voltage() + + # object size + print(sys.getsizeof(ego)) + + print(str(datetime.now())) + + +if __name__ == '__main__': + main() diff --git a/ego/scenario_setting.json b/ego/scenario_setting.json index bb19ccdf..d3c1fd7d 100644 --- a/ego/scenario_setting.json +++ b/ego/scenario_setting.json @@ -3,38 +3,47 @@ "eTraGo": true, "eDisGo": true, "db": "oedb", - "result_id": 359, - "recover": false, - "gridversion": null + "recover": true, + "result_id": 26, + "gridversion": "v0.4.2" }, "eTraGo": { - "db": "oedb", - "gridversion": null, "method": "lopf", "pf_post_lopf": false, - "start_snapshot": 1, - "end_snapshot" : 5, - "scn_name": "SH NEP 2035", + "start_snapshot": 720, + "end_snapshot" : 730, "solver": "gurobi", + "solver_options":{"threads":4, + "method":2, + "crossover":1, + "BarConvTol":"1.e-5", + "FeasibilityTol":"1.e-5"}, + "scn_name": "NEP 2035", + "scn_extension": null, + "scn_decommissioning": null, + "add_Belgium_Norway": false, "lpfile": false, "results": false, "export": false, - "storage_extendable": false, - "generator_noise": true, + "extendable": "[]", + "generator_noise": false, "reproduce_noise": false, "minimize_loading": false, - "k_mean_clustering": false, - "network_clustering": false, + "network_clustering_kmeans": false, + "load_cluster": false, + "network_clustering_ehv": false, + "snapshot_clustering": false, "parallelisation": false, "skip_snapshots": false, "line_grouping": false, - "branch_capacity_factor": 1, + "branch_capacity_factor": 0.8, "load_shedding": false, - "comments": null + "comments": "eDisGo integration" }, "eDisGo": { - "direct_specs": false, - "specs": true, - "comments": "make a comment, SH SQ" + "ding0_files": "data/MV_grids/20180713110719", + "choice_mode": "manual", + "manual_grids": [1729], + "no_grids": null } } diff --git a/ego/tools/__init__.py b/ego/tools/__init__.py index 3ecdf3c8..c28ee935 100644 --- a/ego/tools/__init__.py +++ b/ego/tools/__init__.py @@ -1,3 +1,6 @@ +""" +""" + __copyright__ = "Europa-Universität Flensburg, Centre for Sustainable Energy Systems" __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "wolf_bunke" diff --git a/ego/tools/economics.py b/ego/tools/economics.py index 09d1b89a..06c7c18c 100644 --- a/ego/tools/economics.py +++ b/ego/tools/economics.py @@ -1,18 +1,46 @@ -""" -Module to collect useful functions for economic calculation of eGo - -Todo: - 1) Investment costs of eTrago and eDisGo - 2) Total system costs +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# File description +"""This module collects useful functions for economic calculation of eGo which can +mainly distinguished in operational and investment costs. """ + import io -import pandas as pd import os +import logging +logger = logging.getLogger('ego') + +if not 'READTHEDOCS' in os.environ: + import pandas as pd + import numpy as np + from ego.tools.utilities import get_time_steps + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ + "Flensburg, Centre for Sustainable Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolfbunke" + # calculate annuity per time step or periode -def annuity_per_period(capex, n, wacc, t): - """ +def annuity_per_period(capex, n, wacc, t, p): + """ Calculate per given period + Parameters ---------- capex : float @@ -21,14 +49,369 @@ def annuity_per_period(capex, n, wacc, t): Number of years that the investment is used (economic lifetime) wacc : float Weighted average cost of capital - - ToDo - ---- t : int Timesteps in hours - i : float - interest rate - ... + p : float + interest rate + + """ + + # ToDo change formular to hourly annuity costs + return capex * (wacc * (1 + wacc) ** n) / ((1 + wacc) ** n - 1) + + +def edisgo_convert_capital_costs(overnight_cost, t, p, json_file): + """ Get scenario and calculation specific annuity cost by given capital + costs and lifetime. + + + Parameters + ---------- + json_file : :obj:dict + Dictionary of the ``scenario_setting.json`` file + _start_snapshot : int + Start point of calculation from ``scenario_setting.json`` file + _end_snapshot : int + End point of calculation from ``scenario_setting.json`` file + _p : numeric + interest rate of investment + _T : int + lifetime of investment + + Returns + ------- + annuity_cost : numeric + Scenario and calculation specific annuity cost by given capital + costs and lifetime + + Examples + -------- + .. math:: + + PVA = (1 / p) - (1 / (p*(1 + p)^T)) + + """ + # Based on eTraGo calculation in + # https://github.com/openego/eTraGo/blob/dev/etrago/tools/utilities.py#L651 + + # Calculate present value of an annuity (PVA) + PVA = (1 / p) - (1 / (p*(1 + p) ** t)) + + year = 8760 + # get period of calculation + period = (json_file['eTraGo']['start_snapshot'] + - json_file['eTraGo']['start_snapshot']) + + # calculation of capital_cost + annuity_cost = (overnight_cost / (PVA * (year/(period+1)))) + + return annuity_cost + + +def etrago_operating_costs(network): + """ Function to get all operating costs of eTraGo. + + Parameters + ---------- + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + + Returns + ------- + power_price : :pandas:`pandas.Dataframe` + DataFrame with aggregate operational costs per component and voltage + level in [EUR] per calculated time steps. + + Example + ------- + + .. code-block:: python + + >>> from ego.tools.io import eGo + >>> ego = eGo(jsonpath='scenario_setting.json') + >>> ego.etrago.operating_costs + + +-------------+-------------------+------------------+ + | component |operation_costs | voltage_level | + +=============+===================+==================+ + |biomass | 27.0 | | + +-------------+-------------------+------------------+ + |line losses | 0.0 | | + +-------------+-------------------+------------------+ + |wind_onshore | 0.0 | | + +-------------+-------------------+------------------+ + """ + # TODO - change naming and function structure + # TODO - seperate operation costs in other functions ? + # - losses + # - grid losses : amount and costs + # - use calc_line_losses(network) from etrago pf_post_lopf + + etg = network + + # groupby v_nom + power_price = etg.generators_t.p[etg.generators[etg.generators. + control != 'Slack'].index] * etg.generators.\ + marginal_cost[etg.generators[etg.generators. + control != 'Slack'].index] # without Slack + + power_price = power_price.groupby( + etg.generators.carrier, axis=1).sum().sum() + power_price + + etg.buses_t.marginal_price + etg.buses_t['p'].sum().sum() + + # active power x nodel price / + etg.lines_t['p0'].sum().sum() + etg.lines_t['p1'].sum().sum() + # Reactive power + etg.lines_t['q0'].sum().sum() + etg.lines_t['q1'].sum().sum() + + # currency/MVA ? wie berechnen + + etg.lines_t['mu_lower'].sum().sum() + + etg.lines['s_nom'].sum() + + etg.lines_t['mu_upper'].sum().sum() + + return power_price + + +def etrago_grid_investment(network, json_file): + """ Function to get grid expantion costs form etrago + + Parameters + ---------- + + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + json_file : :obj:dict + Dictionary of the ``scenario_setting.json`` file + + Returns + ------- + grid_investment_costs : :pandas:`pandas.Dataframe` + Dataframe with ``voltage_level``, ``number_of_expansion`` and + ``capital_cost`` per calculated time steps + + Example + ------- + + .. code-block:: python + + >>> from ego.tools.io import eGo + >>> ego = eGo(jsonpath='scenario_setting.json') + >>> ego.etrago.grid_investment_costs + + +--------------+-------------------+--------------+ + | voltage_level|number_of_expansion| capital_cost| + +==============+===================+==============+ + | ehv | 27.0 | 31514.1305 | + +--------------+-------------------+--------------+ + | hv | 0.0 | 0.0 | + +--------------+-------------------+--------------+ + """ + + # check settings for extendable + if 'network' not in json_file['eTraGo']['extendable']: + print("The optimizition was not using parameter 'extendable': network") + print("No grid expantion costs from etrago") + + if 'network' in json_file['eTraGo']['extendable']: + + lines = network.lines[['v_nom', 'capital_cost', 's_nom', + 's_nom_min', 's_nom_opt']].reset_index() + + lines['s_nom_expansion'] = lines.s_nom_opt.subtract( + lines.s_nom, axis='index') + lines['capital_cost'] = lines.s_nom_expansion.multiply( + lines.capital_cost, axis='index') + lines['number_of_expansion'] = lines.s_nom_expansion > 0.0 + lines['time_step'] = get_time_steps(json_file) + + # add v_level + lines['voltage_level'] = 'unknown' + + ix_ehv = lines[lines['v_nom'] >= 380].index + lines.set_value(ix_ehv, 'voltage_level', 'ehv') + + ix_hv = lines[(lines['v_nom'] <= 220) & (lines['v_nom'] >= 110)].index + lines.set_value(ix_hv, 'voltage_level', 'hv') + + # based on eTraGo Function: + # https://github.com/openego/eTraGo/blob/dev/etrago/tools/utilities.py#L651 + # Definition https://pypsa.org/doc/components.html#line + + # get costs of transfomers + trafos = network.transformers[['v_nom0', 'v_nom1', 'capital_cost', + 's_nom_extendable', 's_nom', + 's_nom_opt']] + trafos.columns.name = "" + trafos.index.name = "" + trafos.reset_index() + + trafos['s_nom_extendable'] = trafos.s_nom_opt.subtract( + trafos.s_nom, axis='index') + trafos['capital_cost'] = trafos.s_nom_extendable.multiply( + trafos.capital_cost, axis='index') + trafos['number_of_expansion'] = trafos.s_nom_extendable > 0.0 + trafos['time_step'] = get_time_steps(json_file) + + # add v_level + trafos['voltage_level'] = 'unknown' + + # TODO check + ix_ehv = trafos[trafos['v_nom0'] >= 380].index + trafos.set_value(ix_ehv, 'voltage_level', 'ehv') + + ix_hv = trafos[(trafos['v_nom0'] <= 220) & + (trafos['v_nom0'] >= 110)].index + trafos.set_value(ix_hv, 'voltage_level', 'hv') + + # aggregate lines and trafo + line = lines[['voltage_level', + 'capital_cost']].groupby('voltage_level').sum().reset_index() + + trafo = trafos[['voltage_level', + 'capital_cost']].groupby('voltage_level').sum().reset_index() + + # merge trafos and line + frames = [line, trafo] + + grid_investment_costs = pd.concat(frames) + + return grid_investment_costs + + # ToDo: add .agg({'number_of_expansion':lambda x: x.count(), + # 's_nom_expansion': np.sum, + # 'grid_costs': np.sum}) <- time_step + pass + + +def edisgo_grid_investment(edisgo_networks, json_file): + """ + Function aggregates all costs, based on all calculated eDisGo + grids and their weightings + + Parameters + ---------- + edisgo_networks : :class:`ego.tools.edisgo_integration.EDisGoNetworks` + Contains multiple eDisGo networks + + Returns + ------- + None or :pandas:`pandas.DataFrame` + Dataframe containing annuity costs per voltage level + + """ + etrago_args = json_file['eTraGo'] + scn_name = etrago_args['scn_name'] + + if scn_name == 'Status Quo': + logger.info('No eDisGo grid investment in Status Quo scenario') + return None + + t = 40 + p = 0.05 + logger.warning('For all components T={} and p={} is used'.format(t, p)) + + annuity_costs = pd.DataFrame(columns=['voltage_level', 'annuity_costs']) + + for key, value in edisgo_networks.edisgo_grids.items(): + + if value is None: + logger.warning('No results available for grid {}'.format(key)) + continue + + costs_single = value.network.results.grid_expansion_costs + + if (costs_single['total_costs'].sum() == 0.): + logger.info('No expansion costs for grid {}'.format(key)) + continue + +# costs_single = costs_single.rename( +# columns={'voltage_level': 'voltage_level'} +# ) + + choice = edisgo_networks.grid_choice + weighting = choice.loc[ + choice['the_selected_network_id'] == key + ][ + 'no_of_points_per_cluster' + ].values[0] + + costs_single['annuity_costs'] = edisgo_convert_capital_costs( + costs_single['total_costs'], + t=t, + p=p, + json_file=json_file) + + costs_single['annuity_costs'] = ( + costs_single['annuity_costs'] * weighting) + + costs_single = costs_single[['voltage_level', 'annuity_costs']] + + annuity_costs = annuity_costs.append(costs_single, ignore_index=True) + + if len(annuity_costs) == 0: + logger.info('No expansion costs in any MV grid') + return None + + else: + aggr_capital_costs = annuity_costs.groupby( + ['voltage_level']).sum().reset_index() + aggr_capital_costs = aggr_capital_costs.rename( + columns={'annuity_costs': 'capital_cost'} + ) + aggr_capital_costs['capital_cost'] = ( + aggr_capital_costs['capital_cost'] + * 1000) # In eDisGo all costs are in kEuro, however + # eGo only takes Euro + + return aggr_capital_costs + + +def get_generator_investment(network, scn_name): + """ Get investment costs per carrier/ generator. + + """ + # TODO - change values in csv + # - add values to database + # work around later db table -> check capital_cost as cost input?!? + + etg = network + + # TODO change it to utilities function + try: + dirname = os.path.dirname(__file__) + filename = 'investment_costs.csv' + path = os.path.join(dirname, filename) + invest = pd.DataFrame.from_csv(path + '~/data/'+filename) + except FileNotFoundError: + path = os.getcwd() + filename = 'investment_costs.csv' + invest = pd.DataFrame.from_csv(path + '/data/'+filename) + + if scn_name in ['SH Status Quo', 'Status Quo']: + invest_scn = 'Status Quo' + + if scn_name in ['SH NEP 2035', 'NEP 2035']: + invest_scn = 'NEP 2035' + + if scn_name in ['SH eGo 100', 'eGo 100']: + invest_scn = 'eGo 100' + + gen_invest = pd.concat([invest[invest_scn], + etg.generators.groupby('carrier')['p_nom'].sum()], + axis=1, join='inner') + + gen_invest = pd.concat([invest[invest_scn], etg.generators.groupby('carrier') + ['p_nom'].sum()], axis=1, join='inner') + gen_invest['carrier_costs'] = gen_invest[invest_scn] * \ + gen_invest['p_nom'] * 1000 # in MW - return capex * (wacc * (1 + wacc) ** n) / ((1 + wacc) ** n - 1) # ToDo change formular to hourly annuity costs + return gen_invest diff --git a/ego/tools/edisgo_integration.py b/ego/tools/edisgo_integration.py new file mode 100644 index 00000000..d1c1f2b8 --- /dev/null +++ b/ego/tools/edisgo_integration.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +""" +This file is part of the the eGo toolbox. +It contains the class definition for multiple eDisGo networks. +""" +__copyright__ = ("Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke, maltesc" + +# Import +import os +import logging +if not 'READTHEDOCS' in os.environ: + from egoio.db_tables import model_draft, grid + from egoio.tools import db + from edisgo.grid.network import Results, TimeSeriesControl + from edisgo.tools.edisgo_run import ( + run_edisgo_basic + ) + from edisgo.grid import tools + from ego.tools.specs import ( + get_etragospecs_direct + ) + from ego.tools.mv_cluster import ( + analyze_attributes, + cluster_mv_grids) + + import pandas as pd + from sqlalchemy.orm import sessionmaker + + +# Logging +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) +logger = logging.getLogger('ego') + +class EDisGoNetworks: + """ + Performs multiple eDisGo runs and stores the resulting edisgo_grids + + Parameters + ---------- + json_file : :obj:dict + Dictionary of the ``scenario_setting.json`` file + etrago_network: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + + """ + + def __init__(self, json_file, etrago_network): + + conn = db.connection(section='oedb') + Session = sessionmaker(bind=conn) + self._session = Session() + + # Genral Json Inputs + self._json_file = json_file + self._grid_version = self._json_file['global']['gridversion'] + + # eTraGo args + self._etrago_args = self._json_file['eTraGo'] + self._scn_name = self._etrago_args['scn_name'] + + # eDisGo args + self._edisgo_args = self._json_file['eDisGo'] + self._ding0_files = self._edisgo_args['ding0_files'] + self._choice_mode = self._edisgo_args['choice_mode'] + + ## Scenario translation + if self._scn_name == 'Status Quo': + self._generator_scn = None + elif self._scn_name == 'NEP 2035': + self._generator_scn = 'nep2035' + elif self._scn_name == 'eGo 100': + self._generator_scn = 'ego100' + + ## Versioning + if self._grid_version is not None: + self._versioned = True + else: + self._versioned = False + + # eTraGo Results (Input) + self._etrago_network = etrago_network + + # eDisGo Results + self._edisgo_grids = {} + + ## Execute Functions + self._set_grid_choice() + self._run_edisgo_pool() + + @property + def edisgo_grids(self): + """ + Container for eDisGo grids, including all results + + Returns + ------- + :obj:`dict` of :class:`edisgo.grid.network.EDisGo` + Dictionary of eDisGo objects, keyed by MV grid ID + + """ + return self._edisgo_grids + + @property + def grid_choice(self): + """ + Container for the choice of MV grids, including their weighting + + Returns + ------- + :pandas:`pandas.DataFrame` + Dataframe containing the chosen grids and their weightings + + """ + return self._grid_choice + + def _analyze_cluster_attributes(self): + """ + Analyses the attributes wind and solar capacity and farthest node + for clustering. + """ + analyze_attributes(self._ding0_files) + + def _cluster_mv_grids(self, no_grids): + """ + Clusters the MV grids based on the attributes, for a given number + of MV grids + + Parameters + ---------- + no_grids : int + Desired number of clusters (of MV grids) + + Returns + ------- + :pandas:`pandas.DataFrame` + Dataframe containing the clustered MV grids and their weightings + + """ + attributes_path = self._ding0_files + '/attributes.csv' + + if not os.path.isfile(attributes_path): + logger.info('Attributes file is missing') + logger.info('Attributes will be calculated') + self._analyze_cluster_attributes() + + return cluster_mv_grids(self._ding0_files, no_grids) + + def _check_available_mv_grids(self): + """ + Checks all available MV grids in the given folder (from the settings) + + Returns + ------- + :obj:`list` + List of MV grid ID's + + """ + mv_grids = [] + for file in os.listdir(self._ding0_files): + if file.endswith('.pkl'): + mv_grids.append( + int(file.replace( + 'ding0_grids__', '' + ).replace('.pkl', ''))) + + return mv_grids + + def _set_grid_choice(self): + """ + Sets the grid choice based on the settings file + + """ + if self._choice_mode == 'cluster': + no_grids = self._edisgo_args['no_grids'] + logger.info('Clustering to {} MV grids'.format(no_grids)) + cluster = self._cluster_mv_grids(no_grids) + + elif self._choice_mode == 'manual': + man_grids = self._edisgo_args['manual_grids'] + cluster = pd.DataFrame( + man_grids, + columns=['the_selected_network_id']) + cluster['no_of_points_per_cluster'] = 1 + logger.info( + 'Calculating manually chosen MV grids {}'.format(man_grids) + ) + + elif self._choice_mode == 'all': + mv_grids = self._check_available_mv_grids() + cluster = pd.DataFrame( + mv_grids, + columns=['the_selected_network_id']) + cluster['no_of_points_per_cluster'] = 1 + no_grids = len(mv_grids) + logger.info( + 'Calculating all available {} MV grids'.format(no_grids) + ) + + self._grid_choice = cluster + + def _run_edisgo_pool(self): + """ + Runs eDisGo for the chosen grids + + """ + logger.warning('Parallelization not implemented yet') + no_grids = len(self._grid_choice) + count = 0 + for idx, row in self._grid_choice.iterrows(): + prog = '%.1f' % (count / no_grids * 100) + logger.info( + '{} % Calculated by eDisGo'.format(prog) + ) + + mv_grid_id = int(row['the_selected_network_id']) + logger.info( + 'MV grid {}'.format(mv_grid_id) + ) + try: + edisgo_grid = self._run_edisgo(mv_grid_id) + self._edisgo_grids[ + mv_grid_id + ] = edisgo_grid + except Exception: + self._edisgo_grids[mv_grid_id] = None + logger.exception( + 'MV grid {} failed: \n'.format(mv_grid_id) + ) + count += 1 + + def _run_edisgo(self, mv_grid_id, apply_curtailment=True): + + """ + Performs a single eDisGo run + + Parameters + ---------- + mv_grid_id : int + MV grid ID of the ding0 grid + + Returns + ------- + :class:`edisgo.grid.network.EDisGo` + Returns the complete eDisGo container, also including results + """ + + logger.info('Calculating interface values') + bus_id = self._get_bus_id_from_mv_grid(mv_grid_id) + + specs = get_etragospecs_direct( + self._session, + bus_id, + self._etrago_network, + self._scn_name) + + ding0_filepath = ( + self._ding0_files + + '/ding0_grids__' + + str(mv_grid_id) + + '.pkl') + + if not os.path.isfile(ding0_filepath): + msg = 'Not MV grid file for MV grid ID: ' + str(mv_grid_id) + logger.error(msg) + raise Exception(msg) + + logger.info('Initial MV grid reinforcement (worst-case anaylsis)') + edisgo_grid = run_edisgo_basic( + ding0_filepath=ding0_filepath, + generator_scenario=None, + analysis='worst-case')[0] # only the edisgo_grid is returned + + logger.info('eTraGo feed-in case') + edisgo_grid.network.results = Results() + + if self._generator_scn: + logger.info( + 'Importing generators for scenario {}'.format( + self._scn_name) + ) + edisgo_grid.import_generators( + generator_scenario=self._generator_scn) + else: + logger.info( + 'No generators imported for scenario {}'.format( + self._scn_name) + ) + edisgo_grid.network.pypsa = None + + logger.info('Updating eDisGo timeseries with eTraGo values') + edisgo_grid.network.timeseries = TimeSeriesControl( + network=edisgo_grid.network, + timeseries_generation_fluctuating=specs['potential'], + timeseries_generation_dispatchable=specs['conv_dispatch'], + timeseries_load='demandlib', + timeindex=specs['conv_dispatch'].index).timeseries + + if apply_curtailment: + logger.info('Including Curtailment') + gens_df = tools.get_gen_info(edisgo_grid.network) + solar_wind_capacities = gens_df.groupby( + by=['type', 'weather_cell_id'] + )['nominal_capacity'].sum() + + curt_abs = pd.DataFrame(columns=specs['curtailment'].columns) + for col in curt_abs: + curt_abs[col] = ( + specs['curtailment'][col] + * solar_wind_capacities[col]) + + edisgo_grid.curtail(curtailment_methodology='curtail_all', + timeseries_curtailment=curt_abs) + # Think about the other curtailment functions!!!! + + edisgo_grid.analyze() + + edisgo_grid.reinforce() + + return edisgo_grid + + def _get_mv_grid_from_bus_id(self, bus_id): + """ + Queries the MV grid ID for a given eTraGo bus + + Parameters + ---------- + bus_id : int + eTraGo bus ID + + Returns + ------- + int + MV grid (ding0) ID + + """ + + if self._versioned is True: + ormclass_hvmv_subst = grid.__getattribute__( + 'EgoDpHvmvSubstation' + ) + subst_id = self._session.query( + ormclass_hvmv_subst.subst_id + ).filter( + ormclass_hvmv_subst.otg_id == bus_id, + ormclass_hvmv_subst.version == self._grid_version + ).scalar() + + if self._versioned is False: + ormclass_hvmv_subst = model_draft.__getattribute__( + 'EgoGridHvmvSubstation' + ) + subst_id = self._session.query( + ormclass_hvmv_subst.subst_id + ).filter( + ormclass_hvmv_subst.otg_id == bus_id + ).scalar() + + return subst_id + + def _get_bus_id_from_mv_grid(self, subst_id): + """ + Queries the eTraGo bus ID for given MV grid (ding0) ID + + Parameters + ---------- + subst_id : int + MV grid (ding0) ID + + Returns + ------- + int + eTraGo bus ID + + """ + if self._versioned is True: + ormclass_hvmv_subst = grid.__getattribute__( + 'EgoDpHvmvSubstation' + ) + bus_id = self._session.query( + ormclass_hvmv_subst.otg_id + ).filter( + ormclass_hvmv_subst.subst_id == subst_id, + ormclass_hvmv_subst.version == self._grid_version + ).scalar() + + if self._versioned is False: + ormclass_hvmv_subst = model_draft.__getattribute__( + 'EgoGridHvmvSubstation' + ) + bus_id = self._session.query( + ormclass_hvmv_subst.otg_id + ).filter( + ormclass_hvmv_subst.subst_id == subst_id + ).scalar() + + return bus_id \ No newline at end of file diff --git a/ego/tools/io.py b/ego/tools/io.py index b16ed583..843f6e3e 100644 --- a/ego/tools/io.py +++ b/ego/tools/io.py @@ -1,103 +1,469 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +"""This file contains the eGo main class as well as input & output functions +of eGo in order to build the eGo application container. """ -Input & output functions of eGo - -""" -__copyright__ = "ZNES" -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE" -__author__ = "wolfbunke" - import sys import os +import logging +logger = logging.getLogger('ego') +import pandas as pd +import numpy as np + if not 'READTHEDOCS' in os.environ: import pyproj as proj + #import geopandas as gpd + from shapely.geometry import Polygon, Point, MultiPolygon - from sqlalchemy import MetaData, create_engine, func + from sqlalchemy import MetaData, create_engine, and_, func from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.automap import automap_base - from geoalchemy2 import Geometry, shape # Geometry type used by SQLA from geoalchemy2 import * - import geopandas as gpd + + from egoio.tools import db + from etrago.tools.io import load_config_file + from egoio.db_tables.model_draft import EgoGridPfHvSource as Source,\ + EgoGridPfHvTempResolution as TempResolution + from ego.tools.results import (create_etrago_results) + from ego.tools.storages import (etrago_storages_investment, etrago_storages) + from ego.tools.economics import ( + etrago_operating_costs, + etrago_grid_investment, + edisgo_grid_investment, + get_generator_investment) + from ego.tools.utilities import get_scenario_setting, get_time_steps + from ego.tools.edisgo_integration import EDisGoNetworks from egoio.db_tables.model_draft import RenpassGisParameterRegion from egoio.db_tables import model_draft, grid - from sqlalchemy import and_, func - import pandas as pd - from egoio.tools import db + from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, + curtailment, gen_dist, storage_distribution, + plot_voltage, plot_residual_load, + plot_line_loading_diff, full_load_hours, + extension_overlay_network) + from etrago.appl import etrago + from importlib import import_module + import pypsa + import re +__copyright__ = ("Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke,maltesc" -def geolocation_buses(network, session): - """ - Use Geometries of buses x/y (lon/lat) and Polygons - of Countries from RenpassGisParameterRegion - in order to locate the buses + +class egoBasic(object): + """The eGo basic class select and creates based on your + ``scenario_setting.json`` file your definde eTraGo and + eDisGo results container. And contains the session for the + database connection. Parameters ---------- - network : Network - eTraGo Network - session : sqlalchemy - session to oedb + jsonpath : :obj:`json` + Path to ``scenario_setting.json`` file. - ToDo - ---- - - check eTrago stack generation plots and other in order of adaptation + Returns + ------- + json_file : :obj:dict + Dictionary of the ``scenario_setting.json`` file + session : :sqlalchemy:`sqlalchemy.orm.session.Session` + SQLAlchemy session to the OEDB """ - # Start db connetion - # get renpassG!S scenario data - - meta = MetaData() - meta.bind = session.bind - conn = meta.bind - # get db table - meta.reflect(bind=conn, schema='model_draft', - only=['renpass_gis_parameter_region']) - - # map to classes - Base = automap_base(metadata=meta) - Base.prepare() - RenpassGISRegion = \ - Base.classes.renpass_gis_parameter_region - - # Define regions - region_id = ['DE','DK', 'FR', 'BE', 'LU', \ - 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL'] - - query = session.query(RenpassGISRegion.gid, RenpassGISRegion.u_region_id, - RenpassGISRegion.stat_level, RenpassGISRegion.geom, - RenpassGISRegion.geom_point) - - # get regions by query and filter - Regions = [(gid, u_region_id, stat_level, shape.to_shape(geom),\ - shape.to_shape(geom_point)) for gid, u_region_id, stat_level,\ - geom, geom_point in query.filter(RenpassGISRegion.u_region_id.\ - in_(region_id)).all()] - - crs = {'init': 'epsg:4326'} - # transform lon lat to shapely Points and create GeoDataFrame - points = [Point(xy) for xy in zip( network.buses.x, network.buses.y)] - bus = gpd.GeoDataFrame(network.buses, crs=crs, geometry=points) - # Transform Countries Polygons as Regions - region = pd.DataFrame(Regions, columns=['id','country','stat_level','Polygon','Point']) - re = gpd.GeoDataFrame(region, crs=crs, geometry=region['Polygon']) - # join regions and buses by geometry which intersects - busC = gpd.sjoin(bus, re, how='inner', op='intersects') - #busC - # Drop non used columns - busC = busC.drop(['index_right', 'Point', 'id', 'Polygon', 'stat_level','geometry'], axis=1) - # add busC to eTraGo.buses - network.buses['country_code'] = busC['country'] - - # close session - session.close() - return network + def __init__(self, + jsonpath, *args, **kwargs): + + self.jsonpath = 'scenario_setting.json' + self.json_file = get_scenario_setting(self.jsonpath) + + # Database connection from json_file + try: + conn = db.connection(section=self.json_file['global']['db']) + Session = sessionmaker(bind=conn) + self.session = Session() + logger.info('Connected to Database') + except: + logger.error('Failed connection to Database', exc_info=True) + + # get scn_name + self.scn_name = self.json_file['eTraGo']['scn_name'] + + pass + + pass -def results_to_excel(results): +class eTraGoResults(egoBasic): + """The ``eTraGoResults`` class create and contains all results + of eTraGo and it's network container for eGo. + + Returns + ------- + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + etrago: :pandas:`pandas.Dataframe` + DataFrame which collects several eTraGo results """ - Wirte results to excel + def __init__(self, jsonpath, *args, **kwargs): + """ + """ + super(eTraGoResults, self).__init__(self, jsonpath, + *args, **kwargs) + + self.etrago_network = None + + logger.info('eTraGoResults startet') + + if self.json_file['global']['recover'] is True: + + # Delete arguments from scenario_setting + logger.info('Remove given eTraGo settings from scenario_setting') + + try: + self.json_file['global']['eTraGo'] = False + + for i in self.json_file['eTraGo'].keys(): + + self.json_file['eTraGo'][i] = 'removed by recover' + + # ToDo add scenario_setting for results + self.json_file['eTraGo']['db'] = self.json_file['global']['db'] + logger.info( + 'Add eTraGo scenario_setting from oedb result') + # To do .... + _prefix = 'EgoGridPfHvResult' + schema = 'model_draft' + packagename = 'egoio.db_tables' + _pkg = import_module(packagename + '.' + schema) + + # get metadata + # version = json_file['global']['gridversion'] + + orm_meta = getattr(_pkg, _prefix + 'Meta') + self.jsonpath = recover_resultsettings(self.session, + self.json_file, + orm_meta, + self.json_file['global'] + ['result_id']) + + except KeyError: + pass + + logger.info('Create eTraGo network from oedb result') + self.etrago_network = etrago_from_oedb(self.session, self.json_file) + + # create eTraGo NetworkScenario network + if self.json_file['global']['eTraGo'] is True: + logger.info('Create eTraGo network') + self.etrago_network = etrago(self.json_file['eTraGo']) + + # add selected results to Results container + + self.etrago = pd.DataFrame() + self.etrago.storage_investment_costs = etrago_storages_investment( + self.etrago_network, self.json_file) + self.etrago.storage_charges = etrago_storages(self.etrago_network) + self.etrago.operating_costs = etrago_operating_costs( + self.etrago_network) + self.etrago.generator = create_etrago_results(self.etrago_network, + self.scn_name) + self.etrago.grid_investment_costs = etrago_grid_investment(self. + etrago_network, + self.json_file) + # + + # add functions direct + # self.etrago_network.etrago_line_loading = etrago_line_loading + + pass + + if not 'READTHEDOCS' in os.environ: + # include eTraGo functions and methods + def etrago_line_loading(self, **kwargs): + """ + Integrate and use function from eTraGo. + For more information see: + """ + # add if time_step <1 -> plot + return plot_line_loading(network=self.etrago_network, **kwargs) + + def etrago_stacked_gen(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return plot_stacked_gen(network=self.etrago_network, **kwargs) + + def etrago_curtailment(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return curtailment(network=self.etrago_network, **kwargs) + + def etrago_gen_dist(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return gen_dist(network=self.etrago_network, **kwargs) + + def etrago_storage_distribution(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return storage_distribution(network=self.etrago_network, **kwargs) + + def etrago_voltage(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return plot_voltage(network=self.etrago_network, **kwargs) + + def etrago_residual_load(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return plot_residual_load(network=self.etrago_network, **kwargs) + + def etrago_line_loading_diff(self, networkB, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return plot_line_loading_diff(networkA=self.etrago_network, + networkB=networkB, **kwargs) + + def etrago_extension_overlay_network(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return extension_overlay_network(network=self.etrago_network, + **kwargs) + + def etrago_full_load_hours(self, **kwargs): + """ + Integrate function from eTraGo. + For more information see: + """ + return full_load_hours(network=self.etrago_network, **kwargs) + + # add other methods from eTraGo here + + +class eDisGoResults(eTraGoResults): + """The ``eDisGoResults`` class create and contains all results + of eDisGo and its network containers. + + """ + + def __init__(self, jsonpath, *args, **kwargs): + super(eDisGoResults, self).__init__(self, jsonpath, *args, **kwargs) + + self._edisgo = None + self._edisgo_networks = None + + if self.json_file['global']['eDisGo'] is True: + logger.info('Create eDisGo networks') + + self._edisgo = pd.DataFrame() + + self._edisgo_networks = EDisGoNetworks( + json_file=self.json_file, + etrago_network=self.etrago_network) + + self._edisgo.grid_investment_costs = edisgo_grid_investment( + self._edisgo_networks, + self.json_file + ) + + @property + def edisgo_networks(self): + """ + Container for eDisGo grids, including all results + + Returns + ------- + :obj:`dict` of :class:`edisgo.grid.network.EDisGo` + Dictionary of eDisGo objects, keyed by MV grid ID + + """ + return self._edisgo_networks + + @property + def edisgo(self): + """ + Contains basic informations about eDisGo + + Returns + ------- + :pandas:`pandas.DataFrame` + + """ + return self._edisgo + + +class eGo(eDisGoResults): + """Main eGo module which including all results and main functionalities. + + + Returns + ------- + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + edisgo_networks : :class:`ego.tools.edisgo_integration.EDisGoNetworks` + Contains multiple eDisGo networks + edisgo : :pandas:`pandas.Dataframe` + aggregated results of eDisGo + etrago : :pandas:`pandas.Dataframe` + aggregated results of eTraGo + + + """ + + def __init__(self, jsonpath, *args, **kwargs): + super(eGo, self).__init__(self, jsonpath, + *args, **kwargs) + + # super().__init__(eDisGo) + self.total = pd.DataFrame() + # add total results here + #self.total_investment_costs = pd.DataFrame() + # self.total_operation_costs = pd.DataFrame() # TODO + + def total_investment_costs(self): + """ Get total investment costs of all voltage level for storages + and grid expansion + """ + + _grid_ehv = self.etrago.grid_investment_costs.capital_cost.sum() + + _storage = self.etrago.storage_investment_costs.capital_cost.sum() + + self._total_inv_cost = pd.DataFrame(columns=['component', + 'capital_cost']) + self._total_inv_cost = self._total_inv_cost.append({'component': 'ehv-hv grid', + 'capital_cost': _grid_ehv}, + ignore_index=True) + self._total_inv_cost = self._total_inv_cost.append({'component': 'storage', + 'capital_cost': _storage}, + ignore_index=True) + + if self.json_file['global']['eDisGo'] is True: + + _grid_mv_lv = self.edisgo.grid_investment_costs.capital_cost.sum() + + self._total_inv_cost = self._total_inv_cost.\ + append({'component': 'mv-lv grid', + 'capital_cost': _grid_mv_lv}, + ignore_index=True) + self.total_investment_cost = self._total_inv_cost + + def plot_total_investment_costs(self): + """ Plot total investment costs + """ + self.total_investment_costs() + + return self.total_investment_cost.plot.bar(x='component', + y='capital_cost', rot=1) + + # write_results_to_db(): + logging.info('Initialisation of eGo Results') + + +# def geolocation_buses(network, session): +# """ +# Use Geometries of buses x/y(lon/lat) and Polygons +# of Countries from RenpassGisParameterRegion +# in order to locate the buses +# +# Parameters +# ---------- +# network_etrago: : class: `etrago.tools.io.NetworkScenario` +# eTraGo network object compiled by: meth: `etrago.appl.etrago` +# session: : sqlalchemy: `sqlalchemy.orm.session.Session < orm/session_basics.html >` +# SQLAlchemy session to the OEDB + # + # """ + # # ToDo: check eTrago stack generation plots and other in order of adaptation + # # Start db connetion + # # get renpassG!S scenario data + # + # meta = MetaData() + # meta.bind = session.bind + # conn = meta.bind + # # get db table + # meta.reflect(bind=conn, schema='model_draft', + # only=['renpass_gis_parameter_region']) + # + # # map to classes + # Base = automap_base(metadata=meta) + # Base.prepare() + # RenpassGISRegion = Base.classes.renpass_gis_parameter_region + # + # # Define regions + # region_id = ['DE', 'DK', 'FR', 'BE', 'LU', + # 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL'] + # + # query = session.query(RenpassGISRegion.gid, RenpassGISRegion.u_region_id, + # RenpassGISRegion.stat_level, RenpassGISRegion.geom, + # RenpassGISRegion.geom_point) + # + # # get regions by query and filter + # Regions = [(gid, u_region_id, stat_level, shape.to_shape(geom), + # shape.to_shape(geom_point)) for gid, u_region_id, stat_level, + # geom, geom_point in query.filter(RenpassGISRegion.u_region_id. + # in_(region_id)).all()] + # + # crs = {'init': 'epsg:4326'} + # # transform lon lat to shapely Points and create GeoDataFrame + # points = [Point(xy) for xy in zip(network.buses.x, network.buses.y)] + # bus = gpd.GeoDataFrame(network.buses, crs=crs, geometry=points) + # # Transform Countries Polygons as Regions + # region = pd.DataFrame( + # Regions, columns=['id', 'country', 'stat_level', 'Polygon', 'Point']) + # re = gpd.GeoDataFrame(region, crs=crs, geometry=region['Polygon']) + # # join regions and buses by geometry which intersects + # busC = gpd.sjoin(bus, re, how='inner', op='intersects') + # # busC + # # Drop non used columns + # busC = busC.drop(['index_right', 'Point', 'id', 'Polygon', + # 'stat_level', 'geometry'], axis=1) + # # add busC to eTraGo.buses + # network.buses['country_code'] = busC['country'] + # + # # close session + # session.close() + # + # return network + + +def results_to_excel(ego): + """ + Wirte results to excel """ # Write the results as xlsx file # ToDo add time of calculation to file name @@ -105,46 +471,36 @@ def results_to_excel(results): writer = pd.ExcelWriter('open_ego_results.xlsx', engine='xlsxwriter') # write results of installed Capacity by fuels - results.total.to_excel(writer, index=False, sheet_name='Total Calculation') + ego.total.to_excel(writer, index=False, sheet_name='Total Calculation') # write orgininal data in second sheet - results.to_excel(writer, index=True, sheet_name='Results by carriers') - #add plots + ego.to_excel(writer, index=True, sheet_name='Results by carriers') + # add plots # Close the Pandas Excel writer and output the Excel file. writer.save() # buses -def etrago_from_oedb(session, args): - """ - Function with import eTraGo results for the Database. +def etrago_from_oedb(session, json_file): + """Function which import eTraGo results for the Database by + ``result_id`` and if ``recover`` is set to ``true``. Parameters ---------- - session (obj): - sqlalchemy session to the OEDB + session : :sqlalchemy:`sqlalchemy.orm.session.Session` + SQLAlchemy session to the OEDB + json_file : :obj:`dict` + Dictionary of the ``scenario_setting.json`` file - args (dict): - args from eGo scenario_setting.json + Returns + ------- + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` - ToDo - ---- - add Mapping for grid schema - make it more generic -> class? """ - result_id = args['global']['result_id'] - # modules from model_draft - from egoio.db_tables.model_draft import EgoGridPfHvSource as Source,\ - EgoGridPfHvTempResolution as TempResolution - from etrago.tools.io import loadcfg - from importlib import import_module - import pypsa - import re - import logging - logging.basicConfig(level=logging.INFO) - logger = logging.getLogger(__name__) + result_id = json_file['global']['result_id'] # functions def map_ormclass(name): @@ -159,71 +515,98 @@ def map_ormclass(name): return _mapped + def id_to_source(query): - def dataframe_results( name, session, result_id, ormclass): - """ - Function to get pandas DataFrames by the result_id - """ + # ormclass = map_ormclass(name) + # query = session.query(ormclass).filter(ormclass.result_id == result_id) - query = session.query(ormclass).filter(ormclass.result_id == result_id) + # TODO column naming in database + return {k.source_id: k.name for k in query.all()} - if name == 'Transformer': - name = 'Trafo' + def dataframe_results(name, session, result_id, ormclass): + """ + Function to get pandas DataFrames by the result_id - df = pd.read_sql(query.statement, - session.bind, - index_col=name.lower() + '_id') + Parameters + ---------- + session : :sqlalchemy:`sqlalchemy.orm.session.Session` + SQLAlchemy session to the OEDB + """ - if str(ormclass)[:-2].endswith('T'): - df = pd.Dataframe() + query = session.query(ormclass).filter(ormclass.result_id == result_id) - return df + if name == 'Transformer': + name = 'Trafo' + df = pd.read_sql(query.statement, + session.bind, + index_col=name.lower() + '_id') - def series_results(name, column, session, meta_args, result_id, ormclass): - """ - Function to get Time Series as pandas DataFrames by the result_id + if name == 'Link': + df['bus0'] = df.bus0.astype(int) + df['bus1'] = df.bus1.astype(int) - ToDo - ---- - - check index of bus_t and soon is wrong! + if 'source' in df: - """ - # TODO: pls make more robust - id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id' - id_column = id_column.lower() + source_orm = Source - query = session.query( - getattr(ormclass, id_column), - getattr(ormclass, column). - label(column)).filter(and_( - ormclass.result_id == result_id - )) + source_query = session.query(source_orm) - df = pd.io.sql.read_sql(query.statement, - session.bind, - columns=[column], - index_col=id_column) + df.source = df.source.map(id_to_source(source_query)) - df.index = df.index.astype(str) + if str(ormclass)[:-2].endswith('T'): + df = pd.Dataframe() - # change of format to fit pypsa - df = df[column].apply(pd.Series).transpose() + return df - try: - assert not df.empty - df.index = timeindex - except AssertionError: - print("No data for %s in column %s." % (name, column)) + def series_results(name, column, session, result_id, ormclass): + """ + Function to get Time Series as pandas DataFrames by the result_id + + Parameters + ---------- + session: : sqlalchemy: `sqlalchemy.orm.session.Session < orm/session_basics.html >` + SQLAlchemy session to the OEDB + """ + + # TODO - check index of bus_t and soon is wrong! + # TODO: pls make more robust + + id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id' + id_column = id_column.lower() + + query = session.query( + getattr(ormclass, id_column), + getattr(ormclass, column). + label(column)).filter(and_( + ormclass.result_id == result_id + )) - return df + df = pd.io.sql.read_sql(query.statement, + session.bind, + columns=[column], + index_col=id_column) + + df.index = df.index.astype(str) + + # change of format to fit pypsa + df = df[column].apply(pd.Series).transpose() + + try: + assert not df.empty + df.index = timeindex + except AssertionError: + print("No data for %s in column %s." % (name, column)) + + return df # create config for results path = os.getcwd() - config = loadcfg(path+'/tools/config.json')['results'] # add meta_args with args of results + # add meta_args with args of results + config = load_config_file(path+'/tools/config.json')['results'] # map and Database settings of etrago_from_oedb() - _prefix= 'EgoGridPfHvResult' + _prefix = 'EgoGridPfHvResult' schema = 'model_draft' packagename = 'egoio.db_tables' _pkg = import_module(packagename + '.' + schema) @@ -231,52 +614,36 @@ def series_results(name, column, session, meta_args, result_id, ormclass): carr_ormclass = 'Source' _mapped = {} - - # get metadata - version = args['global']['gridversion'] + # version = json_file['global']['gridversion'] - orm_meta = getattr(_pkg, _prefix + 'Meta') + orm_meta = getattr(_pkg, _prefix + 'Meta') # check result_id - result_id_in = session.query(orm_meta.result_id).filter(orm_meta.\ - result_id==result_id).all() + result_id_in = session.query( + orm_meta.result_id).filter(orm_meta. + result_id == result_id).all() if result_id_in: - logger.info('Choosen result_id %s found in DB',result_id) + logger.info('Choosen result_id %s found in DB', result_id) else: logger.info('Error: result_id not found in DB') - # get meta data as args - meta = session.query(orm_meta.result_id,orm_meta.scn_name,orm_meta.calc_date, - orm_meta.user_name ,orm_meta.method, orm_meta.start_snapshot, - orm_meta.end_snapshot, orm_meta.solver, orm_meta.settings - ).filter(orm_meta.result_id== result_id) - - meta_df = pd.read_sql(meta.statement, meta.session.bind, index_col='result_id') - - meta_args = dict(meta_df.settings[result_id]) - meta_args['scn_name'] = meta_df.scn_name[result_id] - meta_args['method'] = meta_df.method[result_id] - meta_args['start_snapshot'] = meta_df.start_snapshot[result_id] - meta_args['end_snapshot'] = meta_df.end_snapshot[result_id] - meta_args['solver'] = meta_df.solver[result_id] + meta_args = recover_resultsettings(session, json_file, orm_meta, result_id) # get TempResolution - temp = TempResolution + temp = TempResolution - tr = session.query(temp.temp_id,temp.timesteps, - temp.resolution, temp.start_time).one() + tr = session.query(temp.temp_id, temp.timesteps, + temp.resolution, temp.start_time).one() timeindex = pd.DatetimeIndex(start=tr.start_time, periods=tr.timesteps, freq=tr.resolution) - timeindex = timeindex[meta_args['start_snapshot'] - 1: meta_args['end_snapshot'] ] - - meta_args['temp_id'] = tr.temp_id - + timeindex = timeindex[meta_args['eTraGo']['start_snapshot'] - + 1: meta_args['eTraGo']['end_snapshot']] # create df for PyPSA network @@ -287,27 +654,27 @@ def series_results(name, column, session, meta_args, result_id, ormclass): if pypsa.__version__ == '0.11.0': old_to_new_name = {'Generator': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'source': 'carrier', - 'dispatch': 'former_dispatch'}, - 'Bus': - {'current_type': 'carrier'}, - 'Transformer': - {'trafo_id': 'transformer_id'}, - 'Storage': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial', - 'source': 'carrier'}} + {'p_min_pu_fixed': 'p_min_pu', + 'p_max_pu_fixed': 'p_max_pu', + 'source': 'carrier', + 'dispatch': 'former_dispatch'}, + 'Bus': + {'current_type': 'carrier'}, + 'Transformer': + {'trafo_id': 'transformer_id'}, + 'Storage': + {'p_min_pu_fixed': 'p_min_pu', + 'p_max_pu_fixed': 'p_max_pu', + 'soc_cyclic': 'cyclic_state_of_charge', + 'soc_initial': 'state_of_charge_initial', + 'source': 'carrier'}} timevarying_override = True else: old_to_new_name = {'Storage': - {'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial'}} + {'soc_cyclic': 'cyclic_state_of_charge', + 'soc_initial': 'state_of_charge_initial'}} # get data into dataframes logger.info('Start building eTraGo results network') @@ -334,14 +701,15 @@ def series_results(name, column, session, meta_args, result_id, ormclass): name = name[:-1] pypsa_comp_name = name - if name == 'Storage': + if name == 'Storage': pypsa_comp_name = 'StorageUnit' - if name == 'Transformer': + if name == 'Transformer': name = 'Trafo' for col in columns: - df_series = series_results(name, col, session, meta_args, result_id, ormclass) + df_series = series_results( + name, col, session, result_id, ormclass) # TODO: VMagPuSet? if timevarying_override and comp == 'Generator': @@ -359,13 +727,49 @@ def series_results(name, column, session, meta_args, result_id, ormclass): except (ValueError, AttributeError): print("Series %s of component %s could not be " - "imported" % (col, pypsa_comp_name)) - + "imported" % (col, pypsa_comp_name)) print('Done') logger.info('Imported eTraGo results of id = %s ', result_id) return network +def recover_resultsettings(session, json_file, orm_meta, result_id): + """ Recover scenario_setting from database + """ + + # check result_id + result_id_in = session.query( + orm_meta.result_id).filter(orm_meta. + result_id == result_id).all() + + # get meta data as json_file + meta = session.query(orm_meta.result_id, orm_meta.scn_name, orm_meta.calc_date, + orm_meta.user_name, orm_meta.method, orm_meta.start_snapshot, + orm_meta.end_snapshot, orm_meta.solver, orm_meta.settings + ).filter(orm_meta.result_id == result_id) + + meta_df = pd.read_sql( + meta.statement, meta.session.bind, index_col='result_id') + + # update json_file with main data by result_id + json_file['eTraGo']['scn_name'] = meta_df.scn_name[result_id] + json_file['eTraGo']['method'] = meta_df.method[result_id] + json_file['eTraGo']['start_snapshot'] = meta_df.start_snapshot[result_id] + json_file['eTraGo']['end_snapshot'] = meta_df.end_snapshot[result_id] + json_file['eTraGo']['solver'] = meta_df.solver[result_id] + + # update json_file with specific data by result_id + meta_set = dict(meta_df.settings[result_id]) + + for key in json_file['eTraGo'].keys(): + try: + json_file['eTraGo'][key] = meta_set[key] + except KeyError: + pass + + return json_file + + if __name__ == '__main__': pass diff --git a/ego/tools/mv_cluster.py b/ego/tools/mv_cluster.py new file mode 100644 index 00000000..0eca5734 --- /dev/null +++ b/ego/tools/mv_cluster.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +""" +This file contains all functions regarding the clustering of MV grids +""" +__copyright__ = ("Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke, maltesc" + +# Import +#from __future__ import print_function +import os +if not 'READTHEDOCS' in os.environ: + import pickle + + import pandas as pd + + from sklearn.cluster import KMeans + import numpy as np + + #import matplotlib.pyplot as plt + #from mpl_toolkits.mplot3d import Axes3D + #import matplotlib.cm as cm + + +def analyze_attributes(ding0_files): + """ + Calculates the attributes wind and solar capacity and farthest node + for all files in ding0_files. Results are written to ding0_files + + Parameters + ---------- + ding0_files : :obj:`str` + Path to ding0 files + + """ + base_path = ding0_files + + not_found = [] + tccs = [] # Total Cumulative Capacity of Solar + tccw = [] # Total Cumulative Capacity of Wind + fnlvmv = [] # the Farthest Node in both networks (lv and mv) + MV_id_list = [] # Distrct id list + + for district_number in list(range(1, 4000)): # 3608 MVGDs + + # To bypass not found error + try: + pickle_name = 'ding0_grids__{}.pkl'.format( + district_number) # To open the pickle files + # assign the data to a variable + nd = pickle.load(open(os.path.join(base_path, pickle_name), 'rb')) + print('District no.', district_number, 'found!') + except: + # append not found network files id to a list + not_found.append(district_number) + # print('District no.', district_number, 'NOT found') # print not found ids + continue + + MV_id = 0 + MV_id = nd._mv_grid_districts[0].id_db + + mv_cum_solar_MV = 0 # Solar cumulative capacity in MV + mv_cum_wind_MV = 0 # Solar cumulative capacity in MV + + # cumulative capacity of solar and wind in MV + for geno in nd._mv_grid_districts[0].mv_grid.generators(): + if geno.type == 'solar': + mv_cum_solar_MV += geno.capacity + if geno.type == 'wind': + mv_cum_wind_MV += geno.capacity + + lvg = 0 +# lvg_type = [] +# counter_lv = [{}] + mv_cum_solar_LV = 0 + mv_cum_wind_LV = 0 + + # cumulative capacity of solar and wind in LV + for lvgs in nd._mv_grid_districts[0].lv_load_areas(): + for lvgs1 in lvgs.lv_grid_districts(): + lvg += len(list(lvgs1.lv_grid.generators())) # No. of DGs in lv + for deno in lvgs1.lv_grid.generators(): + if deno.type == 'solar': + mv_cum_solar_LV += deno.capacity + if deno.type == 'wind': + mv_cum_wind_LV += deno.capacity + + # Total solar cumulative capacity in lv and mv + total_cum_solar = mv_cum_solar_MV + mv_cum_solar_LV + # Total wind cumulative capacity in lv and mv + total_cum_wind = mv_cum_wind_MV + mv_cum_wind_LV + + # append to lists + tccs.append(total_cum_solar) + tccw.append(total_cum_wind) + + # The farthest node length from MV substation + from ding0.core.network.stations import LVStationDing0 + + tot_dist = [] + max_length = 0 + max_length_list = [] + max_of_max = 0 + + # make CB open (normal operation case) + nd.control_circuit_breakers(mode='open') + # setting the root to measure the path from + root_mv = nd._mv_grid_districts[0].mv_grid.station() + # 1st from MV substation to LV station node + # Iteration through nodes + for node2 in nd._mv_grid_districts[0].mv_grid._graph.nodes(): + # select only LV station nodes + if isinstance(node2, LVStationDing0) and not node2.lv_load_area.is_aggregated: + + length_from_MV_to_LV_station = 0 + # Distance from MV substation to LV station node + length_from_MV_to_LV_station = nd._mv_grid_districts[0].mv_grid.graph_path_length( + node_source=node2, node_target=root_mv) / 1000 + # 2nd from LV station node to the longest distance node + + # Iteration through lv load areas + for lvgs in nd._mv_grid_districts[0].lv_load_areas(): + for lvgs1 in lvgs.lv_grid_districts(): # Iteration through lv grid districts + # In order to measure the distance between the LV station and the nodes that belong to it and not from other stations + if lvgs1.lv_grid._station == node2: + root_lv = node2 # setting a new root + for node1 in lvgs1.lv_grid._graph.nodes(): # iteration of all nodes in LV grid + + length_from_LV_staion_to_LV_node = 0 + # Distance from LV station to LV nodes + length_from_LV_staion_to_LV_node = lvgs1.lv_grid.graph_path_length( + node_source=node1, node_target=root_lv) / 1000 + + length_from_LV_node_to_MV_substation = 0 + # total distances in both grids MV and LV + length_from_LV_node_to_MV_substation = length_from_MV_to_LV_station + \ + length_from_LV_staion_to_LV_node + + # append the total distance to a list + tot_dist.append( + length_from_LV_node_to_MV_substation) + if any(tot_dist): # to make sure the list is not empty + # to pick up the max length within this grid + max_length = max(tot_dist) + # append max lengths of all grids to a list + max_length_list.append(max_length) + if any(max_length_list): # to make sure the list is not empty + # to pick up max of max + max_of_max = max(max_length_list) + + fnlvmv.append(max_of_max) # append to a new list + MV_id_list.append(MV_id) # append the network id to a new list + + # export results to dataframes + d = {'id': MV_id_list, 'Solar_cumulative_capacity': tccs, + 'Wind_cumulative_capacity': tccw, + 'The_Farthest_node': fnlvmv} # assign lists to columns + # not founded networks + are_not_found = {'District_files_that_are_not_found': not_found} + + df = pd.DataFrame(d) # dataframe for results + + # dataframe for not found files id + df_are_not_found = pd.DataFrame(are_not_found) + + # Exporting dataframe to CSV files + df.to_csv(base_path + '/' + 'attributes.csv', sep=',') + df_are_not_found.to_csv(base_path + '/' + 'Not_found_grids.csv', sep=',') + + # 3d scatter plotting + #from mpl_toolkits.mplot3d import Axes3D + #import matplotlib.pyplot as plt + # + #fig = plt.figure() + #ax = fig.add_subplot(111, projection='3d') + # + #X = tccs + #Y = tccw + #Z = fnlvmv + # + #ax.scatter(X, Y, Z) + # + #ax.set_xlim(0, max(tccs)/1000) + #ax.set_ylim(0, max(tccw)/1000) + #ax.set_zlim(0, max(fnlvmv)) + # + #ax.set_xlabel('\nSolar cumulative capacity (MW)', linespacing=2) + #ax.set_ylabel('\nWind cumulative capacity (MW)', linespacing=2) + #ax.set_zlabel('\nThe farthest node (km)', linespacing=2) + # + # plt.show() + + +def cluster_mv_grids(ding0_files, no_grids): + """ + Clusters the MV grids based on the attributes, for a given number + of MV grids + + Parameters + ---------- + ding0_files : :obj:`str` + Path to ding0 files + no_grids : int + Desired number of clusters (of MV grids) + + Returns + ------- + :pandas:`pandas.DataFrame` + Dataframe containing the clustered MV grids and their weightings + + """ + # import CSV data file that exported from Networks_analysis_solar_wind_farthest-node.py and assign it to a data frame + df = pd.read_csv(ding0_files + '/attributes.csv') + + # extract each column to a variable + x = df.Solar_cumulative_capacity # Solar capacity in MV and LV + y = df.Wind_cumulative_capacity # Wind capacity in MV and LV + # The farthest node (the length between HV/MV substation to the farthest node in LV networks) + z = df.The_Farthest_node + id_ = df.id # Network id + + # Addressing the max value of each column + max_solar = max(x) + max_wind = max(y) + max_farthest = max(z) + + # Converting data to perunit scale + solar_pu = x / max_solar + wind_pu = y / max_wind + distances_pu = z / max_farthest + + # Converting from vectors to coordinates array + m = [] + for r, s, t in zip(solar_pu, wind_pu, distances_pu): + f = [r, s, t] + m.append(f) + X = np.array(m) + + # Initialize KMeans clustering by Sklearn pkg + no_clusters = no_grids # no. of clusters + + # random state should be given in order to have same results with every run of the script + # it acts as a seed where the algorihm define the starting clustering point, 1808 shows good results + ran_state = 1808 + + # Starting KMeans clustering + kmeans = KMeans(n_clusters=no_clusters, random_state=ran_state) + + # Return a label for each point which indicates to which cluster each point is assigned + cluster_labels = kmeans.fit_predict(X) + + # Centers of clusters + centroids = kmeans.cluster_centers_ + + id_clus_dist = {} + + # Iterate through each point in dataset array X + for i in range(len(X)): + clus = cluster_labels[i] # point's cluster id + cent = centroids[cluster_labels[i]] # Cluster's center coordinates + + # Distance from that point to cluster's center (3d coordinates) + dist = ((X[i][0] - centroids[clus][0]) ** 2 + (X[i][1] - centroids[clus] + [1]) ** 2 + (X[i][2] - centroids[clus][2]) ** 2) ** (1 / 2) + + # three results are appended to a list (cluster's id, point's (MVGD) id and distance from that point to cluster's center) + #id_clus_dist = [clus, id_[i], dist] + # list_id_clus_dist.append(id_clus_dist) + + # three results are appended to a dictionary (cluster's id, point's (MVGD) id and distance from that point to cluster's center) + id_clus_dist.setdefault(clus, []).append({id_[i]: dist}) + + cluster_id = [] + cluster_points = [] + clus_percentage = [] + closest_point = [] + + # Iterating through the clusters dictionary (key represents cluster's id , value represents another disctionary with network's id and distance of that point to cluster's center) + for key, value in id_clus_dist.items(): + no_points_clus = sum(1 for v in value if v) # How many points/cluster + # percentage of points per cluster + clus_perc = (no_points_clus / len(X)) * 100 + + # in the last dict "id_clus_dist" each key (cluster's id) has several dicts that contain information of network's id and distance, + # the below code just to split the sub dicts and merge them as items in anothe single dict + id_dist = {} + for value_1 in value: + id_dist.update(value_1) + + # returns the shortest distance point (selected network) to cluster's center + short_dist_net_id_dist = min(id_dist.items(), key=lambda x: x[1]) + + # Exporting CSV sheet for every cluster that contains the assigned points (networks) and siatance from each to cluster's center + daf = pd.DataFrame() + daf['Network_id'] = id_dist.keys() + daf['Distance_to_cluster_center'] = id_dist.values() +# daf.to_csv('Cluster_No_{}.csv'.format(key), sep=',') + + # export to lists + cluster_id.append(key) # cluster id + cluster_points.append(no_points_clus) # No of points / cluster + # Percentage of points per cluster, # round(), two digits after comma + clus_percentage.append(round(clus_perc, 2)) + + # the nearest point to cluster center (represented network), there is [0] because it is a tuple + closest_point.append(short_dist_net_id_dist[0]) + + # exporting results to CSV file that contains cluster's id, the no. of assigned points (networks) and the selected network + d = {'CLuster_id': cluster_id, 'no_of_points_per_cluster': cluster_points, + 'cluster_percentage': clus_percentage, + 'the_selected_network_id': closest_point} + df = pd.DataFrame(d) +# df.to_csv('Selected_networks_{}_clusters_dec.csv'.format(no_clusters), sep=',') + + return df + +# # Initiation of 3d graph for scattering plot +# fig = plt.figure() +# fig.suptitle('KMeans clustering', fontsize=20) +# ax = fig.add_subplot(111, projection='3d') +# +# # 3d scatter Plot +# colors = cm.spectral(cluster_labels.astype(float) / no_clusters) +# ax.scatter(X[:, 0], X[:, 1], X[:, 2], alpha=0.3, c=colors) +# +# # Clusters centers 3d scatter plot +# ax.scatter(centroids[:, 0], centroids[:, 1], centroids[:, 2], marker='x', alpha=1, s=50, linewidths=5, zorder=10,c='r') +# +# #limiting the axes scale +# ax.set_xlim(0, 1) +# ax.set_ylim(0, 1) +# ax.set_zlim(0, 1) +# +# # show 3d scatter clustering +# plt.show() +# +# # as for this example where no. of clusters is 20, the results should show 21 CSV files (20 for clusters and 1 for the selected netorks) diff --git a/ego/tools/plots.py b/ego/tools/plots.py index 45ad147b..4d57175d 100644 --- a/ego/tools/plots.py +++ b/ego/tools/plots.py @@ -1,371 +1,393 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +"""Module which collects useful functions for plotting eTraGo, eDisGo and +eGo results. """ -Module to collect useful functions for plotting results of eGo - -ToDo -- histogram -- etc. -- Implement plotly -""" -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +# TODO - Implement plotly for iplot import numpy as np import pandas as pd import os if not 'READTHEDOCS' in os.environ: from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, - add_coordinates, curtailment, gen_dist, - storage_distribution, - plot_voltage,plot_residual_load) + add_coordinates, curtailment, gen_dist, + storage_distribution, + plot_voltage, plot_residual_load) import pyproj as proj from shapely.geometry import Polygon, Point, MultiPolygon from geoalchemy2 import * - import geopandas as gpd + # import geopandas as gpd import folium from folium import plugins import branca.colormap as cm import webbrowser from egoio.db_tables.model_draft import EgoGridMvGriddistrict from egoio.db_tables.grid import EgoDpMvGriddistrict - from tools.results import eGo + from ego.tools.io import eGo import matplotlib.pyplot as plt import logging logger = logging.getLogger('ego') +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ + "Flensburg, Centre for Sustainable Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolfbunke" + # plot colore of Carriers def carriers_colore(): - """ - Return matplotlib colores per pypsa carrier of eTraGo + """ Return matplotlib colore set per carrier (technologies of + generators) of eTraGo. - Returns + Returns ------- - :obj:`dict` : List of carriers and matplotlib colores - - """ - - colors = {'biomass':'green', - 'coal':'k', - 'gas':'orange', - 'eeg_gas':'olive', - 'geothermal':'purple', - 'lignite':'brown', - 'oil':'darkgrey', - 'other_non_renewable':'pink', - 'reservoir':'navy', - 'run_of_river':'aqua', - 'pumped_storage':'steelblue', - 'solar':'yellow', - 'uranium':'lime', - 'waste':'sienna', - 'wind':'skyblue', - 'slack':'pink', - 'load shedding': 'red', - 'nan':'m', - 'imports':'salmon', - '':'m'} - - return colors + colors : :obj:`dict` + List of carriers and matplotlib colores + """ + colors = {'biomass': 'green', + 'coal': 'k', + 'gas': 'orange', + 'eeg_gas': 'olive', + 'geothermal': 'purple', + 'lignite': 'brown', + 'oil': 'darkgrey', + 'other_non_renewable': 'pink', + 'reservoir': 'navy', + 'run_of_river': 'aqua', + 'pumped_storage': 'steelblue', + 'solar': 'yellow', + 'uranium': 'lime', + 'waste': 'sienna', + 'wind': 'skyblue', + 'slack': 'pink', + 'load shedding': 'red', + 'nan': 'm', + 'imports': 'salmon', + '': 'm'} + + return colors def make_all_plots(network): - # make a line loading plot - plot_line_loading(network) - - # plot stacked sum of nominal power for each generator type and timestep - plot_stacked_gen(network, resolution="MW") - - # plot to show extendable storages - storage_distribution(network) + """ Test function which run all ploting functions. - #plot_residual_load(network) + Parameters + ---------- + network_etrago: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` - plot_voltage(network) + Returns + ------- + plots : :meth:`matplotlib.pyplot` + Returns plots + """ + # make a line loading plot + plot_line_loading(network) - #curtailment(network) + # plot stacked sum of nominal power for each generator type and timestep + plot_stacked_gen(network, resolution="MW") - gen_dist(network) + # plot to show extendable storages + storage_distribution(network) - return + # plot_residual_load(network) + plot_voltage(network) + # curtailment(network) + gen_dist(network) -def igeoplot(network, session, tiles=None, geoloc=None, args=None): - """ - Plot function in order to display eGo results on leaflet OSM map. - This function will open the results in your main Webbrowser + return - Parameters - ---------- +# +# def igeoplot(network, session, tiles=None, geoloc=None, args=None): +# """Plot function in order to display eGo results on leaflet OSM map. +# This function will open the results in your main Webbrowser +# +# Parameters +# ---------- +# +# network_etrago:: class: `etrago.tools.io.NetworkScenario` +# eTraGo network object compiled by: meth: `etrago.appl.etrago` +# tiles: str +# Folium background map style `None` as OSM or `Nasa` +# geoloc: : obj: `list` +# List which define center of map as (lon, lat) +# +# Returns +# ------- +# plot: html +# HTML file with .js plot +# """ +# # TODO +# # - implement eDisGo Polygons +# # - fix version problems of data +# # - use grid.ego_dp_hvmv_substation subst_id and otg_id +# # - use cluster or boxes to limit data volumn +# # - add Legend +# # - Map see: http://nbviewer.jupyter.org/gist/BibMartin/f153aa957ddc5fadc64929abdee9ff2e +# # - test cluster +# +# if geoloc is None: +# geoloc = [network.buses.y.mean(), network.buses.x.mean()] +# +# mp = folium.Map(tiles=None, location=geoloc, +# control_scale=True, zoom_start=6) +# +# # add Nasa light background +# if tiles == 'Nasa': +# tiles = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/VIIRS_CityLights_2012/default/GoogleMapsCompatible_Level8/{z}/{y}/{x}.jpg' +# attr = ('© OpenStreetMap contributors, © CartoDB') +# +# folium.raster_layers.TileLayer(tiles=tiles, attr=attr).add_to(mp) +# else: +# folium.raster_layers.TileLayer('OpenStreetMap').add_to(mp) +# # 'Stamen Toner' OpenStreetMap +# +# # Legend name +# bus_group = folium.FeatureGroup(name='Buses') +# # add buses +# +# # get scenario name from args +# scn_name = args['eTraGo']['scn_name'] +# version = args['eTraGo']['gridversion'] +# +# for name, row in network.buses.iterrows(): +# popup = """ < b > Bus: < /b > {} < br > +# carrier: {} < br > +# control: {} < br > +# type: {} < br > +# v_nom: {} < br > +# v_mag_pu_set: {} < br > +# v_mag_pu_min: {} < br > +# v_mag_pu_max: {} < br > +# sub_network: {} < br > +# Scenario: {} < br > +# version: {} < br > +# """.format(row.name, scn_name, row['carrier'], +# row['control'], row['type'], row['v_nom'], row['v_mag_pu_set'], +# row['v_mag_pu_min'], row['v_mag_pu_max'], row['sub_network'], version) # add Popup values use HTML for formating +# folium.Marker([row["y"], row["x"]], popup=popup).add_to(bus_group) +# +# # Prepare lines +# line_group = folium.FeatureGroup(name='Lines') +# +# # get line Coordinates +# x0 = network.lines.bus0.map(network.buses.x) +# x1 = network.lines.bus1.map(network.buses.x) +# +# y0 = network.lines.bus0.map(network.buses.y) +# y1 = network.lines.bus1.map(network.buses.y) +# +# # get content +# text = network.lines +# +# # color map lines +# colormap = cm.linear.Set1.scale( +# text.s_nom.min(), text.s_nom.max()).to_step(6) +# +# def convert_to_hex(rgba_color): +# """ +# convert rgba colors to hex +# """ +# red = str(hex(int(rgba_color[0]*255)))[2:].capitalize() +# green = str(hex(int(rgba_color[1]*255)))[2:].capitalize() +# blue = str(hex(int(rgba_color[2]*255)))[2:].capitalize() +# +# if blue == '0': +# blue = '00' +# if red == '0': +# red = '00' +# if green == '0': +# green = '00' +# +# return '#' + red + green + blue +# +# # toDo add more parameter +# for line in network.lines.index: +# popup = """ Line: {}
+# version: {}
+# v_nom: {}
+# s_nom: {}
+# capital_cost: {}
+# g: {}
+# g_pu: {}
+# terrain_factor: {}
+# """.format(line, version, text.v_nom[line], +# text.s_nom[line], text.capital_cost[line], +# text.g[line], text.g_pu[line], +# text.terrain_factor[line] +# ) +# # ToDo make it more generic +# +# def colormaper(): +# l_color = [] +# if colormap.index[6] >= text.s_nom[line] > colormap.index[5]: +# l_color = colormap.colors[5] +# elif colormap.index[5] >= text.s_nom[line] > colormap.index[4]: +# l_color = colormap.colors[4] +# elif colormap.index[4] >= text.s_nom[line] > colormap.index[3]: +# l_color = colormap.colors[3] +# elif colormap.index[3] >= text.s_nom[line] > colormap.index[2]: +# l_color = colormap.colors[2] +# elif colormap.index[2] >= text.s_nom[line] > colormap.index[1]: +# l_color = colormap.colors[1] +# elif colormap.index[1] >= text.s_nom[line] >= colormap.index[0]: +# l_color = colormap.colors[0] +# else: +# l_color = (0., 0., 0., 1.) +# return l_color +# +# l_color = colormaper() +# +# folium.PolyLine(([y0[line], x0[line]], [y1[line], x1[line]]), +# popup=popup, color=convert_to_hex(l_color)).\ +# add_to(line_group) +# +# # add grod districs +# grid_group = folium.FeatureGroup(name='Grid district') +# subst_id = list(network.buses.index) +# district = prepareGD(session, subst_id, version) +# # todo does not work with k-mean Cluster +# # folium.GeoJson(district).add_to(grid_group) +# +# # add layers and others +# colormap.caption = 'Colormap of Lines s_nom' +# mp.add_child(colormap) +# +# # Add layer groups +# bus_group.add_to(mp) +# line_group.add_to(mp) +# grid_group.add_to(mp) +# folium.LayerControl().add_to(mp) +# +# plugins.Fullscreen( +# position='topright', +# title='Fullscreen', +# title_cancel='Exit me', +# force_separate_button=True).add_to(mp) +# +# # Save Map +# mp.save('map.html') +# +# # Display htm result from consol +# new = 2 # open in a new tab, if possible +# # open a public URL, in this case, the webbrowser docs +# path = os.getcwd() +# url = path+"/map.html" +# webbrowser.open(url, new=new) + +# def prepareGD(session, subst_id=None, version=None): +# """ +# """ +# +# if version == 'v0.2.11': +# query = session.query(EgoDpMvGriddistrict.subst_id, +# EgoDpMvGriddistrict.geom) +# +# Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in +# query.filter(EgoDpMvGriddistrict.version == version, +# EgoDpMvGriddistrict.subst_id.in_(subst_id)).all()] +# # toDo add values of sub_id etc. to popup +# else: +# query = session.query(EgoGridMvGriddistrict.subst_id, +# EgoGridMvGriddistrict.geom) +# Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in +# query.all()] +# +# region = pd.DataFrame(Regions, columns=['subst_id', 'geometry']) +# crs = {'init': 'epsg:3035'} +# region = gpd.GeoDataFrame( +# Regions, columns=['subst_id', 'geometry'], crs=crs) +# +# return region + + +def total_power_costs_plot(etrago_network): + """ + plot power price of eTraGo - network : PyPSA - PyPSA network container - tiles : str - Folium background map style `None` as OSM or `Nasa` - geoloc : list of str - Define center of map as (lon,lat) + Parameters + ---------- + eTraGo :class:`etrago.io.NetworkScenario` - Returns + Returns ------- + plot :obj:`matplotlib.pyplot.show` + https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.show - HTML Plot page - - ToDo - ---- - - implement eDisGo Polygons - - fix version problems of data - - use grid.ego_dp_hvmv_substation subst_id and otg_id - - use cluster or boxes to limit data volumn - - add Legend - - Map see: http://nbviewer.jupyter.org/gist/BibMartin/f153aa957ddc5fadc64929abdee9ff2e - """ - - if geoloc is None: - geoloc = [network.buses.y.mean(),network.buses.x.mean()] - - mp = folium.Map(tiles=None,location=geoloc, control_scale=True, zoom_start=6) - - # add Nasa light background - if tiles == 'Nasa': - tiles = 'https://map1.vis.earthdata.nasa.gov/wmts-webmerc/VIIRS_CityLights_2012/default/GoogleMapsCompatible_Level8/{z}/{y}/{x}.jpg' - attr = ('© OpenStreetMap contributors, © CartoDB') - - folium.raster_layers.TileLayer(tiles = tiles, attr=attr).add_to(mp) - else: - folium.raster_layers.TileLayer('OpenStreetMap').add_to(mp) - # 'Stamen Toner' OpenStreetMap - - # Legend name - bus_group = folium.FeatureGroup(name='Buses') - # add buses - - # get scenario name from args - scn_name = args['eTraGo']['scn_name'] - version = args['eTraGo']['gridversion'] - - for name, row in network.buses.iterrows(): - popup = """ Bus: {}
- carrier: {}
- control: {}
- type:{}
- v_nom:{}
- v_mag_pu_set:{}
- v_mag_pu_min:{}
- v_mag_pu_max:{}
- sub_network:{}
- Scenario: {}
- version: {}
- """.format(row.name, scn_name,row['carrier'], - row['control'],row['type'],row['v_nom'],row['v_mag_pu_set'], - row['v_mag_pu_min'],row['v_mag_pu_max'],row['sub_network'] - ,version) # add Popup values use HTML for formating - folium.Marker([row["y"], row["x"]], popup=popup ).add_to(bus_group) - - - # Prepare lines - line_group = folium.FeatureGroup(name='Lines') - - # get line Coordinates - x0 = network.lines.bus0.map(network.buses.x) - x1 = network.lines.bus1.map(network.buses.x) - - y0 = network.lines.bus0.map(network.buses.y) - y1 = network.lines.bus1.map(network.buses.y) - - # get content - text = network.lines - - # color map lines - colormap = cm.linear.Set1.scale(text.s_nom.min(), text.s_nom.max()).to_step(6) - - def convert_to_hex(rgba_color): - """ - convert rgba colors to hex - """ - red = str(hex(int(rgba_color[0]*255)))[2:].capitalize() - green = str(hex(int(rgba_color[1]*255)))[2:].capitalize() - blue = str(hex(int(rgba_color[2]*255)))[2:].capitalize() - - if blue=='0': - blue = '00' - if red=='0': - red = '00' - if green=='0': - green='00' - - return '#'+ red + green + blue - - #toDo add more parameter - for line in network.lines.index: - popup = """ Line: {}
- version: {}
- v_nom: {}
- s_nom: {}
- capital_cost: {}
- g: {}
- g_pu: {}
- terrain_factor: {}
- """.format(line, version, text.v_nom[line], - text.s_nom[line], text.capital_cost[line], - text.g[line],text.g_pu[line], - text.terrain_factor[line] - ) - # ToDo make it more generic - def colormaper(): - l_color =[] - if colormap.index[6] >= text.s_nom[line] > colormap.index[5]: - l_color = colormap.colors[5] - elif colormap.index[5] >= text.s_nom[line] > colormap.index[4]: - l_color = colormap.colors[4] - elif colormap.index[4] >= text.s_nom[line] > colormap.index[3]: - l_color = colormap.colors[3] - elif colormap.index[3] >= text.s_nom[line] > colormap.index[2]: - l_color = colormap.colors[2] - elif colormap.index[2] >= text.s_nom[line] > colormap.index[1]: - l_color = colormap.colors[1] - elif colormap.index[1] >= text.s_nom[line] >= colormap.index[0]: - l_color = colormap.colors[0] - else: - l_color = (0.,0.,0.,1.) - return l_color - - l_color =colormaper() - - folium.PolyLine(([y0[line], x0[line]], [y1[line], x1[line]]), - popup=popup, color=convert_to_hex(l_color)).\ - add_to(line_group) - - # add grod districs - grid_group = folium.FeatureGroup(name='Grid district') - subst_id = list(network.buses.index) - district = prepareGD(session, subst_id , version) - # todo does not work with k-mean Cluster - #folium.GeoJson(district).add_to(grid_group) - - # add layers and others - colormap.caption = 'Colormap of Lines s_nom' - mp.add_child(colormap) - - # Add layer groups - bus_group.add_to(mp) - line_group.add_to(mp) - grid_group.add_to(mp) - folium.LayerControl().add_to(mp) - - plugins.Fullscreen( - position='topright', - title='Fullscreen', - title_cancel='Exit me', - force_separate_button=True).add_to(mp) - - - # Save Map - mp.save('map.html') - - # Display htm result from consol - new = 2 # open in a new tab, if possible - # open a public URL, in this case, the webbrowser docs - path = os.getcwd() - url = path+"/map.html" - webbrowser.open(url,new=new) - - -def prepareGD(session, subst_id= None, version=None ): - - if version == 'v0.2.11': - query = session.query(EgoDpMvGriddistrict.subst_id, EgoDpMvGriddistrict.geom) - - Regions = [(subst_id,shape.to_shape(geom)) for subst_id, geom in - query.filter(EgoDpMvGriddistrict.version == version , - EgoDpMvGriddistrict.subst_id.in_(subst_id)).all()] - # toDo add values of sub_id etc. to popup - else: - query = session.query(EgoGridMvGriddistrict.subst_id, EgoGridMvGriddistrict.geom) - Regions = [(subst_id,shape.to_shape(geom)) for subst_id, geom in - query.all()] - - - region = pd.DataFrame(Regions, columns=['subst_id','geometry']) - crs = {'init': 'epsg:3035'} - region = gpd.GeoDataFrame(Regions, columns=['subst_id','geometry'],crs=crs) - - return region - - -def total_power_costs_plot(eTraGo): - """ - plot power price of eTraGo - - Parameters - ---------- - eTraGo :class:`etrago.io.NetworkScenario` - - Returns - ------- - plot :obj:`matplotlib.pyplot.show` - M€/KW ->GW/MW - - # Chare of investment costs get volume - #ego.etrago['investment_costs'].sum()/(1000*1000*1000) + """ + input eGo + Bar plot all etrago costs + """ + # fig = plt.figure(figsize=(18,10), dpi=1600) + # plt.pie(ego.etrago['p'],autopct='%.1f') + # plt.title('Procentage of power production') - ego.etrago['p'].plot(kind="pie", - subplots=True, - figsize=(10,10), - autopct='%.1f') + # max(ego.etrago['investment_costs'])/(1000*1000*1000) # T€/kW->M€/KW ->GW/MW + # Chare of investment costs get volume + # ego.etrago['investment_costs'].sum()/(1000*1000*1000) - plt.show() + ego.etrago['p'].plot(kind="pie", + subplots=True, + figsize=(10, 10), + autopct='%.1f') + plt.show() def plotting_invest(result): @@ -380,23 +402,23 @@ def plotting_invest(result): result.plot(kind='bar', ax=ax) - return def plot_storage_use(storages): - """ - Intput ego.storages - """ - - ax = storages[['charge','discharge']].plot(kind='bar', - title ="Storage usage", - stacked=True, - #table=True, - figsize=(15, 10), - legend=True, - fontsize=12) - ax.set_xlabel("Kind of Storage", fontsize=12) - ax.set_ylabel("Charge and Discharge in MWh", fontsize=12) - plt.show() - return + """ + Intput ego.storages + """ + + ax = storages[['charge', 'discharge']].plot(kind='bar', + title="Storage usage", + stacked=True, + # table=True, + figsize=( + 15, 10), + legend=True, + fontsize=12) + ax.set_xlabel("Kind of Storage", fontsize=12) + ax.set_ylabel("Charge and Discharge in MWh", fontsize=12) + plt.show() + return diff --git a/ego/tools/results.py b/ego/tools/results.py index cc55f2c5..57d26a70 100644 --- a/ego/tools/results.py +++ b/ego/tools/results.py @@ -1,19 +1,26 @@ # -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +"""This module include the results functions for analyze and creating results +based on eTraGo or eDisGo for eGo. """ -Module of eGo results with functions for writing, creating and results of eGo - -ToDo ----- - - add eDisGo - - write results to database - - integrate plot and other functions ad methods to the class eGo - - -""" -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ - "Flensburg, Centre for Sustainable Energy Systems" -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolfbunke" +# TODO - write results to database import io import os @@ -23,460 +30,94 @@ if not 'READTHEDOCS' in os.environ: import pandas as pd import numpy as np - from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, - curtailment, gen_dist, - storage_distribution, - plot_voltage,plot_residual_load) - -class egoBasic(): - """eGo basics class. - - Parameters - ---------- - - eTraGo : Network - Network container of eTraGo based on PyPSA - eDisGo : Network - Network container of eDisGo based on PyPSA - args : dict - Dict of the scenario_setting.json - - """ - - def __init__(self, eTraGo, *args, **kwargs): - - self.eTraGo = eTraGo - self.eDisGo = None - self.scn_name = kwargs.get('scn_name', 'Status Quo') - - def __repr__(self): - - r = ('eGoResults is created.') - if not self.eTraGo: - r += "\nThe results does not incluede eTraGo results" - if not self.eDisGo: - r += "\nThe results does not incluede eDisGo results" - return r - - -class eTraGoResults(egoBasic): - """eTraGo Results - - This module contains all results of eTraGo for eGo. - - - Examples - -------- - - The module can be used by ``etg = eTraGoResults()`` - - See also - -------- - The `eTraGo`_ documentation. - - References - ---------- - .. _eTraGo: - `eTraGo `_, \ - eTraGo Documentation. - """ - - def __init__(self,eTraGo, *args, **kwargs): - super().__init__(eTraGo, *args, **kwargs) - self.scn_name = kwargs.get('scn_name', 'Status Quo') - self.etrago = pd.DataFrame() - self.etrago.gernator = None - self.etrago.storage_charges = total_storage_charges(eTraGo) - self.etrago.storage_costs = etrago_storages(eTraGo) - self.etrago.operating_costs = etrago_operating_costs(eTraGo) - - #methods imported from eTraGo - eTraGo.plot_line_loading = plot_line_loading - - eTraGo.plot_stacked_gen = plot_stacked_gen - - eTraGo.curtailment = curtailment - - eTraGo.gen_dist = gen_dist - - eTraGo.storage_distribution = storage_distribution - - eTraGo.plot_voltage = plot_voltage - - eTraGo.plot_residual_load = plot_residual_load - - #self.etrago.gernator = create_etrago_results(eTraGo, scn_name) - - - - - - -class eDisGoResults(egoBasic): - """ eDisGo Results - - This module contains all results of eDisGo for eGo. - - ToDo - ---- - - add eDisGo - - add iteration for multiple ding0 grids - - """ - def __init__(self,eDisGo): - super().__init__(eDisGo) - self.edisgo = pd.DataFrame() - - pass - - -class eGo(eTraGoResults): - """Main eGo module which including all results and main functionalities. - - - Parameters - ---------- - eTraGo : Network - - eDisGo : Network - - - - ToDo - ---- - - add eDisGo - """ - def __init__(self,eTraGo, scn_name): - super().__init__(eTraGo, scn_name) - #super().__init__(eDisGo) - self.total = pd.DataFrame() - # add total results here - - # write_results_to_db(): - - pass + from ego.tools.economics import get_generator_investment +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität"\ + "Flensburg, Centre for Sustainable Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolfbunke" -def total_storage_charges(network): +def create_etrago_results(network, scn_name): # rename function """ - Sum up the pysical storage values of the total scenario based on - eTraGo results. + Create eTraGo results Parameters ---------- - network : eTraGo Network based on pypsa.network - PyPSA Network object modified by eTraGo - - plot (bool): - Use plot function - + network : :class:`~.etrago.tools.io.NetworkScenario` + eTraGo ``NetworkScenario`` based on PyPSA Network. See also + `pypsa.network `_ - Returns - ------- - - results : pandas.DataFrame - Return ... - - Notes - ----- - charge : - Quantity of charged Energy in MWh over scenario time steps - - discharge : - Quantity of discharged Energy in MWh over scenario time steps - - count : - Number of storage units - - p_nom_o_sum: - Sum of optimal installed power capacity - """ - - charge = network.storage_units_t.p[network.storage_units_t.\ - p[network.storage_units[network.storage_units.\ - p_nom_opt>0].index].values>0.].\ - groupby(network.storage_units.carrier, axis=1).sum().sum() - - discharge = network.storage_units_t.p[network.storage_units_t.\ - p[network.storage_units[network.storage_units.\ - p_nom_opt>0].index].values<0.].\ - groupby(network.storage_units.carrier, axis=1).sum().sum() - - - count = network.storage_units.bus[network.storage_units.p_nom_opt>0].\ - groupby(network.storage_units.carrier, axis=0).count() + scn_name : str + Name of used scenario - p_nom_sum = network.storage_units.p_nom.groupby(network.storage_units.\ - carrier, axis=0).sum() - - p_nom_o_sum = network.storage_units.p_nom_opt.groupby(network.storage_units.\ - carrier, axis=0).sum() - p_nom_o = p_nom_sum - p_nom_o_sum # Zubau - - - results = pd.concat([charge.rename('charge'), discharge.rename('discharge'), - p_nom_sum, count.rename('total_units'),p_nom_o\ - .rename('extension'),],axis=1, join='outer') - - - return results - -def etrago_storages(network): - """Function for storage and grid expantion costs of eTraGo. - - Parameters - ---------- - - network : eTraGo Network - eTraGo Network Class based on PyPSA Returns ------- - storages : pandas.DataFrame - DataFrame with cumulated results of storages + generator : :pandas:`pandas.DataFrame` + Result of generator as DataFrame in ``ego.etrago.generator`` """ - # Charge / discharge (MWh) and installed capacity MW - storages = total_storage_charges(network=network) - - return storages - - -def etrago_operating_costs(network): - """ Function to get all operating costs of eTraGo. - - Parameters - ---------- - network : Network of eTraGo - Network of eTraGo - - Returns - ------- - power_price : :class:`~.pd.DataFrame` - - Examples - -------- - - - losses - - grid losses : amount and costs - - use calc_line_losses(network) from etrago pf_post_lopf - - ToDo - ---- - - change naming and function structure - - seperate operation costs in other functions ? - - """ etg = network - #etg = eTraGo - # groupby v_nom - power_price = etg.generators_t.p[etg.generators[etg.generators.\ - control!='Slack'].index]* etg.generators.\ - marginal_cost[etg.generators[etg.generators.\ - control!='Slack'].index] # without Slack - - power_price = power_price.groupby(etg.generators.carrier, axis=1).sum().sum() - power_price + etrago = pd.DataFrame() - etg.buses_t.marginal_price - etg.buses_t['p'].sum().sum() + etrago['p_nom'] = etg.generators.groupby('carrier')['p_nom'].sum() # in MW + etrago['p_nom_opt'] = etg.generators.groupby('carrier')[ + 'p_nom_opt'].sum() # in MW + # power price + etrago['marginal_cost'] = etg.generators.groupby('carrier' + )['marginal_cost'].mean() # in in [EUR] - # active power x nodel price / - etg.lines_t['p0'].sum().sum() - etg.lines_t['p1'].sum().sum() - # Reactive power - etg.lines_t['q0'].sum().sum() - etg.lines_t['q1'].sum().sum() + # get power price by production MWh _t.p * marginal_cost + power_price = etg.generators_t.p[etg.generators[etg.generators. + control != 'Slack'].index] * etg.generators.\ + marginal_cost[etg.generators[etg.generators. + control != 'Slack'].index] # without Slack - # currency/MVA ? wie berechnen + power_price = power_price.groupby( + etg.generators.carrier, axis=1).sum().sum() + etrago['power_price'] = power_price - etg.lines_t['mu_lower'].sum().sum() + # use country code + p_by_carrier = pd.concat([etg.generators_t.p + [etg.generators[etg.generators.control != 'Slack'].index], + etg.generators_t.p[etg.generators[etg. + generators.control == 'Slack'].index].iloc[:, 0]. + apply(lambda x: x if x > 0 else 0)], axis=1).\ + groupby(etg.generators.carrier, axis=1).sum() # in MWh - etg.lines['s_nom'].sum() + etrago['p'] = p_by_carrier.sum() + # add invetment + result_invest = get_generator_investment(network, scn_name) - etg.lines_t['mu_upper'].sum().sum() + etrago = etrago.assign(investment_costs=result_invest['carrier_costs']) - return power_price + return etrago -def etrago_grid_investment(network): - """ Function to get grid expantion costs form etrago +def results_per_voltage(network): + """Get eTraGo results per voltage level Parameters ---------- - - network : Network - eTraGo + network : :class:`etrago.tools.io.NetworkScenario` + eTraGo ``NetworkScenario`` based on PyPSA Network. See also + `pypsa.network `_ Returns ------- - ToDo - ---- - - add new release of etrago 0.7 - """ - - pass - - -def edisgo_grid_investment(network): - """Function to get all costs of grid investment of eDisGo. - - Notes - ----- - - ToDo add iteration and container of all costs of edisgo network - """ - pass - -def get_generator_investment(network, scn_name): - """ Get investment costs per carrier/gernator. - - work around later db table -> check capital_cost as cost input?!? - - ToDo - ---- - - change values in csv - - add values to database - - """ - etg = network - - path = os.getcwd() - filename='investment_costs.csv' - invest = pd.DataFrame.from_csv(path +'/data/'+filename) - - - if scn_name in ['SH Status Quo', 'Status Quo']: - invest_scn = 'Status Quo' - - if scn_name in ['SH NEP 2035', 'NEP 2035']: - invest_scn = 'NEP 2035' - - if scn_name in ['SH eGo 100', 'eGo 100']: - invest_scn = 'eGo 100' - - gen_invest = pd.concat([invest[invest_scn], - etg.generators.groupby('carrier')['p_nom'].sum()], - axis=1, join='inner') - - gen_invest = pd.concat([invest[invest_scn],etg.generators.groupby('carrier')\ - ['p_nom'].sum()], axis=1, join='inner') - gen_invest['carrier_costs'] = gen_invest[invest_scn] * gen_invest['p_nom'] *1000 # in MW - - - return gen_invest - - -def investment_costs(network): - """ - Return pandas DataFrame with investment costs of - - etrago: - Storages - Line extentation - - edisgo: - Line extentation - Storage costs? - - ToDo - ---- - - add edisgo - """ - etg = network - invest = pd.DataFrame() - - # storages - # get total storage investment costs - # unit of costs? - installed_storages = etg.storage_units[etg.storage_units.p_nom_opt!=0] - costs = sum(installed_storages.capital_cost * installed_storages.p_nom_opt) - invest= invest.append({'storage_costs': costs}, ignore_index=True) - - - # get storage costs per voltage level - loc = etg.storage_units[etg.storage_units.p_nom_opt!=0]['bus'] - v_level = etg.buses.loc[loc, :]['v_nom'] - installed_storages= installed_storages.assign(v_nom=0) - - for i,k in v_level.iteritems(): - installed_storages.loc[installed_storages[installed_storages.bus==i].index, 'v_nom'] = k - - storage_level = installed_storages.groupby('v_nom')['capital_cost'].sum() - - - # Line extentation costs - # (eTraGo.lines.s_nom_opt - eTraGo.lines.s_nom) * eTraGo.lines.capital_cost - line_expen = (etg.lines.groupby('v_nom')['s_nom_opt'].sum() - - etg.lines.groupby('v_nom')['s_nom'].sum()) - - if line_expen.sum() <= 0: - print('Warning: !line extentation, set random costs for plotting!') - - lines_level = pd.DataFrame([[110.,722*np.exp(8)],[220.,822*np.exp(8)],\ - [380.,999*np.exp(9)]], columns=['v_nom','capital_cost']).\ - groupby('v_nom')['capital_cost'].sum() - - - invest= invest.assign(line_costs=lines_level.sum()) - - - - #invest.transpose() - - # transfomers expantion costs - return invest - -def create_etrago_results(network,scn_name): - """ - Create eTraGo results - - Returns - ------- - etrago : :obj:pd.DataFrame +def ego_results_to_oedb(total): + """ Function to upload results into oedb database """ + # TODO - etg = network - etrago = pd.DataFrame() - - etrago['p_nom'] = etg.generators.groupby('carrier')\ - ['p_nom'].sum() # in MW - etrago['p_nom_opt'] = etg.generators.groupby('carrier')[ - 'p_nom_opt'].sum() # in MW - # power price - etrago['marginal_cost'] = etg.generators.groupby('carrier' - )['marginal_cost'].mean() # in in [EUR] - - # get power price by production MWh _t.p * marginal_cost - power_price = etg.generators_t.p[etg.generators[etg.generators.\ - control!='Slack'].index]* etg.generators.\ - marginal_cost[etg.generators[etg.generators.\ - control!='Slack'].index] # without Slack - - power_price = power_price.groupby(etg.generators.carrier, axis=1).sum().sum() - etrago['power_price'] = power_price - - # use country code - p_by_carrier = pd.concat([etg.generators_t.p - [etg.generators[etg.generators.control!='Slack'].index], # - etg.generators_t.p[etg.generators[etg. - generators.control=='Slack'].index].iloc[:,0]. - apply(lambda x: x if x > 0 else 0)], axis=1).\ - groupby(etg.generators.carrier, axis=1).sum() # in MWh - - etrago['p'] = p_by_carrier.sum() - # add invetment - result_invest = get_generator_investment(network, scn_name) - - etrago = etrago.assign(investment_costs=result_invest['carrier_costs']) - - return etrago + pass if __name__ == '__main__': diff --git a/ego/tools/specs.py b/ego/tools/specs.py index 680e19cf..dfa5afbc 100644 --- a/ego/tools/specs.py +++ b/ego/tools/specs.py @@ -1,543 +1,690 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +""" +This files contains all eGo interface functions +""" + +__copyright__ = ("Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke,maltesc" + # Import -## General Packages +# General Packages import os import pandas as pd import time if not 'READTHEDOCS' in os.environ: - from sqlalchemy import distinct - from egoio.db_tables import model_draft # This gives me the specific ORM classes. - from edisgo.grid.network import ETraGoSpecs - -import logging # ToDo: Logger should be set up more specific -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -# Functions - -def get_etragospecs_from_db(session, - bus_id, - result_id): - """ - Reads eTraGo Results from Database and returns an Object of the Interface class ETraGoSpecs - - Parameters - ---------- - session : :class:`~.` #Todo: Add class etc.... - Oemof session object (Database Interface) - bus_id : int - ID of the corresponding HV bus - result_id : int - ID of the corresponding database result - - - Returns - ------- - etragospecs : :class:~.` - eDisGo ETraGoSpecs Object - - """ - print("\nSpecs from DB") - specs_meta_data = {} - performance = {} - - specs_meta_data.update({'TG Bus ID':bus_id}) - specs_meta_data.update({'Result ID':result_id}) - - - # Mapping - ormclass_result_meta = model_draft.__getattribute__('EgoGridPfHvResultMeta') - ormclass_result_bus = model_draft.__getattribute__('EgoGridPfHvResultBus') # Instead of using the automapper, this is the explicit alternative (from egoei.db_tables). - #ormclass_result_bus = model_draft.EgoGridPfHvResultBus # This is equivalent - #ormclass_result_bus_t = model_draft.__getattribute__('EgoGridPfHvResultBusT') - ormclass_result_gen = model_draft.__getattribute__('EgoGridPfHvResultGenerator') - ormclass_result_gen_t = model_draft.__getattribute__('EgoGridPfHvResultGeneratorT') - #ormclass_result_gen_single = model_draft.__getattribute__('EgoSupplyPfGeneratorSingle') - #ormclass_result_load = model_draft.__getattribute__('EgoGridPfHvResultLoad') - #ormclass_result_load_t = model_draft.__getattribute__('EgoGridPfHvResultLoadT') - ormclass_result_stor = model_draft.__getattribute__('EgoGridPfHvResultStorage') - ormclass_result_stor_t = model_draft.__getattribute__('EgoGridPfHvResultStorageT') - ormclass_source = model_draft.__getattribute__('EgoGridPfHvSource') - ormclass_aggr_w = model_draft.__getattribute__('ego_supply_aggr_weather_mview') - - # Meta Queries - ## Check - - if session.query(ormclass_result_bus).filter( - ormclass_result_bus.bus_id == bus_id, - ormclass_result_bus.result_id == result_id - ).count() == 0: - logger.warning('Bus not found') - - ## Snapshot Range - - snap_idx = session.query( - ormclass_result_meta.snapshots - ).filter( - ormclass_result_meta.result_id == result_id - ).scalar( - ) - - scn_name = session.query( - ormclass_result_meta.scn_name - ).filter( - ormclass_result_meta.result_id == result_id - ).scalar( - ) - if scn_name == 'SH Status Quo': - scn_name = 'Status Quo' - - specs_meta_data.update({'scn_name':scn_name}) - - # Generators - - try: - t0 = time.perf_counter() - weather_dpdnt = ['wind','solar'] - ## Conventionals - t1 = time.perf_counter() - performance.update({'Generator Data Processing':t1-t0}) - - query = session.query( - ormclass_result_gen.generator_id, # This ID is an aggregate ID (single generators aggregated) - ormclass_result_gen.p_nom, - ormclass_source.name - ).join( - ormclass_source, - ormclass_source.source_id == ormclass_result_gen.source - ).filter( - ormclass_result_gen.bus == bus_id, - ormclass_result_gen.result_id == result_id, - ormclass_source.name.notin_(weather_dpdnt)) - - conv_df = pd.DataFrame(query.all(), - columns=[column['name'] for - column in - query.column_descriptions]) - - conv_cap = conv_df[['p_nom','name']].groupby('name').sum().T - - query = session.query( - ormclass_result_gen_t.generator_id, - ormclass_result_gen_t.p - ).filter( - ormclass_result_gen_t.generator_id.in_(conv_df['generator_id']), - ormclass_result_gen_t.result_id == result_id - ) - - conv_t_df = pd.DataFrame(query.all(), - columns=[column['name'] for column in query.column_descriptions]) - - conv_t_df = pd.merge(conv_df, - conv_t_df, - on='generator_id')[[ - 'name', - 'p']] - - conv_dsptch_norm = pd.DataFrame(0.0, - index=snap_idx, - columns=list(set(conv_df['name']))) - - for index, row in conv_t_df.iterrows(): - source = row['name'] - gen_series_norm = pd.Series( - data=(row['p'] / conv_cap[source]['p_nom'] ), # Every generator normalized by installed capacity. - index=snap_idx) - conv_dsptch_norm[source] = conv_dsptch_norm[source] + gen_series_norm - - ## Renewables - t2 = time.perf_counter() - performance.update({'Conventional Dispatch':t2-t1}) - ### Capacities - - query = session.query( - ormclass_result_gen.generator_id, - ormclass_result_gen.p_nom, - ormclass_result_gen.p_nom_opt, - ormclass_source.name, - ormclass_aggr_w.c.w_id - ).join( - ormclass_source, - ormclass_source.source_id == ormclass_result_gen.source - ).join( - ormclass_aggr_w, - ormclass_aggr_w.c.aggr_id == ormclass_result_gen.generator_id - - ).filter( - ormclass_result_gen.bus == bus_id, - ormclass_result_gen.result_id == result_id, - ormclass_source.name.in_(weather_dpdnt), - ormclass_aggr_w.c.scn_name == scn_name) - - ren_df = pd.DataFrame(query.all(), - columns=[column['name'] for - column in - query.column_descriptions]) - - aggr_gens = ren_df.groupby([ - 'name', - 'w_id' - ]).agg({'p_nom': 'sum'}).reset_index() - - aggr_gens.rename(columns={'p_nom': 'p_nom_aggr'}, inplace=True) - - aggr_gens['ren_id'] = aggr_gens.index - - ### Dispatch and Curteilment - - query = session.query( - ormclass_result_gen_t.generator_id, # This is an aggregated generator ID (see ego_dp_powerflow_assignment_generator for info) - ormclass_result_gen_t.p, - ormclass_result_gen_t.p_max_pu # The maximum output for each snapshot per unit of p_nom for the OPF (e.g. for variable renewable generators this can change due to weather conditions; for conventional generators it represents a maximum dispatch) - ).filter( - ormclass_result_gen_t.generator_id.in_(ren_df['generator_id']), - ormclass_result_gen_t.result_id == result_id - ) - - ren_t_df = pd.DataFrame(query.all(), - columns=[column['name'] for - column in - query.column_descriptions]) - ren_t_df = pd.merge(ren_t_df, ren_df, on='generator_id')[[ - 'generator_id', - 'w_id', - 'name', - 'p', - 'p_max_pu']] - - dispatch = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - curtailment = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) - - for index, row in ren_t_df.iterrows(): - gen_id = row['generator_id'] - name = row['name'] - w_id = row['w_id'] - ren_id = int(aggr_gens[ - (aggr_gens['name'] == name) & - (aggr_gens['w_id'] == w_id)]['ren_id']) - - p_nom_aggr = float(aggr_gens[aggr_gens['ren_id'] == ren_id]['p_nom_aggr']) - p_nom = float(ren_df[ren_df['generator_id'] == gen_id]['p_nom']) - - - p_series = pd.Series(data=row['p'], index=snap_idx) - p_norm_tot_series = p_series / p_nom_aggr - - p_max_pu_series = pd.Series(data=row['p_max_pu'], index=snap_idx) - p_max_norm_tot_series = p_max_pu_series * p_nom / p_nom_aggr + # from sqlalchemy import distinct + # This gives me the specific ORM classes. + from egoio.db_tables import model_draft +# from edisgo.grid.network import ETraGoSpecs - p_curt_norm_tot_series = p_max_norm_tot_series - p_norm_tot_series +import logging +logger = logging.getLogger('ego') - dispatch[ren_id] = dispatch[ren_id] + p_norm_tot_series - curtailment[ren_id] = curtailment[ren_id] + p_curt_norm_tot_series - - except: - logger.exception("Generators could not be queried for \ - Specs with Metadata: \n %s" %specs_meta_data) - - # Load - # Load are not part of the Specs anymore - - # Storage - t3 = time.perf_counter() - performance.update({'Renewable Dispatch and Curt.':t3-t2}) - try: - ## Capactiy - query = session.query( - ormclass_result_stor.storage_id, - ormclass_result_stor.p_nom_opt, - ormclass_result_stor.p_nom, - ormclass_result_stor.max_hours, - ormclass_source.name - ).join( - ormclass_source, - ormclass_source.source_id == ormclass_result_stor.source - ).filter( - ormclass_result_stor.bus == bus_id, - ormclass_result_stor.result_id == result_id, - ormclass_source.name == 'extendable_storage') - - stor_df = pd.DataFrame(query.all(), - columns=[column['name'] for - column in - query.column_descriptions]) - - - stor_df['capacity_MWh'] = stor_df['p_nom_opt'] * stor_df['max_hours'] - - count_bat = 0 - for index, row in stor_df.iterrows(): - if row['max_hours'] >= 20.0: - stor_df.at[index, 'name'] = 'ext_long_term' - else: - stor_df.at[index, 'name'] = 'battery' # ToDo: find a more generic solution - count_bat += 1 - - ### Project Specific Battery Capacity - battery_capacity = 0.0 # MWh - for index, row in stor_df.iterrows(): - if row['name'] == 'battery': - battery_capacity = battery_capacity + row['capacity_MWh'] - - ## Dispatch - query = session.query( - ormclass_result_stor_t.storage_id, - ormclass_result_stor_t.p, - ormclass_result_stor_t.state_of_charge - ).filter( - ormclass_result_stor_t.storage_id.in_( - stor_df['storage_id']), - ormclass_result_stor_t.result_id == result_id - ) - stor_t_df = pd.DataFrame(query.all(), - columns=[column['name'] for - column in - query.column_descriptions]) - - stor_t_df = pd.merge(stor_t_df, stor_df, on='storage_id')[[ - 'storage_id', - 'name', - 'p', - 'state_of_charge']] - - ### Project Specific Battery Active Power - battery_active_power = pd.Series(0.0, index = snap_idx) - for index, row in stor_t_df.iterrows(): - name = row['name'] - if name == 'battery': - stor_series = pd.Series( - data=row['p'], # in MW - index=snap_idx) - stor_series_kW = [x * 1000 for x in stor_series] # in kW - battery_active_power = battery_active_power + stor_series_kW - - except: - logger.exception("Storage could not be queried for \ - Specs with Metadata: \n %s" %specs_meta_data) - - - # Return Specs - t4 = time.perf_counter() - performance.update({'Storage Data Processing and Dispatch':t4-t3}) - - - specs = ETraGoSpecs(battery_capacity=battery_capacity, - battery_active_power=battery_active_power, - - conv_dispatch=conv_dsptch_norm, - - renewables=aggr_gens, - ren_dispatch=dispatch, - ren_curtailment=curtailment) - - # logger.info(specs_meta_data) - t5 = time.perf_counter() - performance.update({'Overall time':t5-t0}) - -# print("\n Conventional Dispatch (Normalized): \n", -# conv_dsptch_norm, -# "\n\n Renewable Generators: \n", -# aggr_gens, -# "\n\n Renewable Dispatch: \n", -# dispatch, -# "\n\n Renewable Curtailment: \n", -# curtailment, "\n\n") - - for keys,values in performance.items(): - print(keys, ": ", values) +# Functions - return specs +# def get_etragospecs_from_db(session, +# bus_id, +# result_id): +# """ +# Reads eTraGo Results from Database and returns an Object of the Interface class ETraGoSpecs +# +# Parameters +# ---------- +# session : :class:`~.` #Todo: Add class etc.... +# Oemof session object (Database Interface) +# bus_id : int +# ID of the corresponding HV bus +# result_id : int +# ID of the corresponding database result +# +# +# Returns +# ------- +# etragospecs : :class:~.` +# eDisGo ETraGoSpecs Object +# +# """ +# print("\nSpecs from DB") +# specs_meta_data = {} +# performance = {} +# +# specs_meta_data.update({'TG Bus ID': bus_id}) +# specs_meta_data.update({'Result ID': result_id}) +# +# # Mapping +# ormclass_result_meta = model_draft.__getattribute__('EgoGridPfHvResultMeta') +# # Instead of using the automapper, this is the explicit alternative (from egoei.db_tables). +# ormclass_result_bus = model_draft.__getattribute__('EgoGridPfHvResultBus') +# # ormclass_result_bus = model_draft.EgoGridPfHvResultBus # This is equivalent +# #ormclass_result_bus_t = model_draft.__getattribute__('EgoGridPfHvResultBusT') +# ormclass_result_gen = model_draft.__getattribute__( +# 'EgoGridPfHvResultGenerator') +# ormclass_result_gen_t = model_draft.__getattribute__( +# 'EgoGridPfHvResultGeneratorT') +# #ormclass_result_gen_single = model_draft.__getattribute__('EgoSupplyPfGeneratorSingle') +# #ormclass_result_load = model_draft.__getattribute__('EgoGridPfHvResultLoad') +# #ormclass_result_load_t = model_draft.__getattribute__('EgoGridPfHvResultLoadT') +# ormclass_result_stor = model_draft.__getattribute__( +# 'EgoGridPfHvResultStorage') +# ormclass_result_stor_t = model_draft.__getattribute__( +# 'EgoGridPfHvResultStorageT') +# ormclass_source = model_draft.__getattribute__('EgoGridPfHvSource') +# ormclass_aggr_w = model_draft.__getattribute__( +# 'ego_supply_aggr_weather_mview') +# +# # Meta Queries +# # Check +# +# if session.query(ormclass_result_bus).filter( +# ormclass_result_bus.bus_id == bus_id, +# ormclass_result_bus.result_id == result_id +# ).count() == 0: +# logger.warning('Bus not found') +# +# # Snapshot Range +# +# snap_idx = session.query( +# ormclass_result_meta.snapshots +# ).filter( +# ormclass_result_meta.result_id == result_id +# ).scalar( +# ) +# +# scn_name = session.query( +# ormclass_result_meta.scn_name +# ).filter( +# ormclass_result_meta.result_id == result_id +# ).scalar( +# ) +# if scn_name == 'SH Status Quo': +# scn_name = 'Status Quo' +# +# specs_meta_data.update({'scn_name': scn_name}) +# +# # Generators +# +# try: +# t0 = time.perf_counter() +# weather_dpdnt = ['wind', 'solar'] +# # Conventionals +# t1 = time.perf_counter() +# performance.update({'Generator Data Processing': t1-t0}) +# +# query = session.query( +# # This ID is an aggregate ID (single generators aggregated) +# ormclass_result_gen.generator_id, +# ormclass_result_gen.p_nom, +# ormclass_source.name +# ).join( +# ormclass_source, +# ormclass_source.source_id == ormclass_result_gen.source +# ).filter( +# ormclass_result_gen.bus == bus_id, +# ormclass_result_gen.result_id == result_id, +# ormclass_source.name.notin_(weather_dpdnt)) +# +# conv_df = pd.DataFrame(query.all(), +# columns=[column['name'] for +# column in +# query.column_descriptions]) +# +# conv_cap = conv_df[['p_nom', 'name']].groupby('name').sum().T +# +# query = session.query( +# ormclass_result_gen_t.generator_id, +# ormclass_result_gen_t.p +# ).filter( +# ormclass_result_gen_t.generator_id.in_(conv_df['generator_id']), +# ormclass_result_gen_t.result_id == result_id +# ) +# +# conv_t_df = pd.DataFrame(query.all(), +# columns=[column['name'] for column in query.column_descriptions]) +# +# conv_t_df = pd.merge(conv_df, +# conv_t_df, +# on='generator_id')[[ +# 'name', +# 'p']] +# +# conv_dsptch_norm = pd.DataFrame(0.0, +# index=snap_idx, +# columns=list(set(conv_df['name']))) +# +# for index, row in conv_t_df.iterrows(): +# source = row['name'] +# gen_series_norm = pd.Series( +# # Every generator normalized by installed capacity. +# data=(row['p'] / conv_cap[source]['p_nom']), +# index=snap_idx) +# conv_dsptch_norm[source] = conv_dsptch_norm[source] + \ +# gen_series_norm +# +# # Renewables +# t2 = time.perf_counter() +# performance.update({'Conventional Dispatch': t2-t1}) +# # Capacities +# +# query = session.query( +# ormclass_result_gen.generator_id, +# ormclass_result_gen.p_nom, +# ormclass_result_gen.p_nom_opt, +# ormclass_source.name, +# ormclass_aggr_w.c.w_id +# ).join( +# ormclass_source, +# ormclass_source.source_id == ormclass_result_gen.source +# ).join( +# ormclass_aggr_w, +# ormclass_aggr_w.c.aggr_id == ormclass_result_gen.generator_id +# +# ).filter( +# ormclass_result_gen.bus == bus_id, +# ormclass_result_gen.result_id == result_id, +# ormclass_source.name.in_(weather_dpdnt), +# ormclass_aggr_w.c.scn_name == scn_name) +# +# ren_df = pd.DataFrame(query.all(), +# columns=[column['name'] for +# column in +# query.column_descriptions]) +# +# aggr_gens = ren_df.groupby([ +# 'name', +# 'w_id' +# ]).agg({'p_nom': 'sum'}).reset_index() +# +# aggr_gens.rename(columns={'p_nom': 'p_nom_aggr'}, inplace=True) +# +# aggr_gens['ren_id'] = aggr_gens.index +# +# ### Dispatch and Curteilment +# +# query = session.query( +# # This is an aggregated generator ID (see ego_dp_powerflow_assignment_generator for info) +# ormclass_result_gen_t.generator_id, +# ormclass_result_gen_t.p, +# # The maximum output for each snapshot per unit of p_nom for the OPF (e.g. for variable renewable generators this can change due to weather conditions; for conventional generators it represents a maximum dispatch) +# ormclass_result_gen_t.p_max_pu +# ).filter( +# ormclass_result_gen_t.generator_id.in_(ren_df['generator_id']), +# ormclass_result_gen_t.result_id == result_id +# ) +# +# ren_t_df = pd.DataFrame(query.all(), +# columns=[column['name'] for +# column in +# query.column_descriptions]) +# ren_t_df = pd.merge(ren_t_df, ren_df, on='generator_id')[[ +# 'generator_id', +# 'w_id', +# 'name', +# 'p', +# 'p_max_pu']] +# +# dispatch = pd.DataFrame(0.0, +# index=snap_idx, +# columns=aggr_gens['ren_id']) +# curtailment = pd.DataFrame(0.0, +# index=snap_idx, +# columns=aggr_gens['ren_id']) +# +# for index, row in ren_t_df.iterrows(): +# gen_id = row['generator_id'] +# name = row['name'] +# w_id = row['w_id'] +# ren_id = int(aggr_gens[ +# (aggr_gens['name'] == name) & +# (aggr_gens['w_id'] == w_id)]['ren_id']) +# +# p_nom_aggr = float( +# aggr_gens[aggr_gens['ren_id'] == ren_id]['p_nom_aggr']) +# p_nom = float(ren_df[ren_df['generator_id'] == gen_id]['p_nom']) +# +# p_series = pd.Series(data=row['p'], index=snap_idx) +# p_norm_tot_series = p_series / p_nom_aggr +# +# p_max_pu_series = pd.Series(data=row['p_max_pu'], index=snap_idx) +# p_max_norm_tot_series = p_max_pu_series * p_nom / p_nom_aggr +# +# p_curt_norm_tot_series = p_max_norm_tot_series - p_norm_tot_series +# +# dispatch[ren_id] = dispatch[ren_id] + p_norm_tot_series +# curtailment[ren_id] = curtailment[ren_id] + p_curt_norm_tot_series +# +# except: +# logger.exception("Generators could not be queried for \ +# Specs with Metadata: \n %s" % specs_meta_data) +# +# # Load +# # Load are not part of the Specs anymore +# +# # Storage +# t3 = time.perf_counter() +# performance.update({'Renewable Dispatch and Curt.': t3-t2}) +# try: +# # Capactiy +# query = session.query( +# ormclass_result_stor.storage_id, +# ormclass_result_stor.p_nom_opt, +# ormclass_result_stor.p_nom, +# ormclass_result_stor.max_hours, +# ormclass_source.name +# ).join( +# ormclass_source, +# ormclass_source.source_id == ormclass_result_stor.source +# ).filter( +# ormclass_result_stor.bus == bus_id, +# ormclass_result_stor.result_id == result_id, +# ormclass_source.name == 'extendable_storage') +# +# stor_df = pd.DataFrame(query.all(), +# columns=[column['name'] for +# column in +# query.column_descriptions]) +# +# stor_df['capacity_MWh'] = stor_df['p_nom_opt'] * stor_df['max_hours'] +# +# count_bat = 0 +# for index, row in stor_df.iterrows(): +# if row['max_hours'] >= 20.0: +# stor_df.at[index, 'name'] = 'ext_long_term' +# else: +# # ToDo: find a more generic solution +# stor_df.at[index, 'name'] = 'battery' +# count_bat += 1 +# +# # Project Specific Battery Capacity +# battery_capacity = 0.0 # MWh +# for index, row in stor_df.iterrows(): +# if row['name'] == 'battery': +# battery_capacity = battery_capacity + row['capacity_MWh'] +# +# # Dispatch +# query = session.query( +# ormclass_result_stor_t.storage_id, +# ormclass_result_stor_t.p, +# ormclass_result_stor_t.state_of_charge +# ).filter( +# ormclass_result_stor_t.storage_id.in_( +# stor_df['storage_id']), +# ormclass_result_stor_t.result_id == result_id +# ) +# stor_t_df = pd.DataFrame(query.all(), +# columns=[column['name'] for +# column in +# query.column_descriptions]) +# +# stor_t_df = pd.merge(stor_t_df, stor_df, on='storage_id')[[ +# 'storage_id', +# 'name', +# 'p', +# 'state_of_charge']] +# +# # Project Specific Battery Active Power +# battery_active_power = pd.Series(0.0, index=snap_idx) +# for index, row in stor_t_df.iterrows(): +# name = row['name'] +# if name == 'battery': +# stor_series = pd.Series( +# data=row['p'], # in MW +# index=snap_idx) +# stor_series_kW = [x * 1000 for x in stor_series] # in kW +# battery_active_power = battery_active_power + stor_series_kW +# +# except: +# logger.exception("Storage could not be queried for \ +# Specs with Metadata: \n %s" % specs_meta_data) +# +# # Return Specs +# t4 = time.perf_counter() +# performance.update({'Storage Data Processing and Dispatch': t4-t3}) +# +# specs = ETraGoSpecs(battery_capacity=battery_capacity, +# battery_active_power=battery_active_power, +# +# conv_dispatch=conv_dsptch_norm, +# +# renewables=aggr_gens, +# ren_dispatch=dispatch, +# ren_curtailment=curtailment) +# +# # logger.info(specs_meta_data) +# t5 = time.perf_counter() +# performance.update({'Overall time': t5-t0}) +# +# print("\n Conventional Dispatch (Normalized): \n", +# conv_dsptch_norm, +## "\n\n Renewable Generators: \n", +# aggr_gens, +## "\n\n Renewable Dispatch: \n", +# dispatch, +## "\n\n Renewable Curtailment: \n", +# curtailment, "\n\n") +# +# for keys, values in performance.items(): +# print(keys, ": ", values) +# +# return specs def get_etragospecs_direct(session, - bus_id, - eTraGo, - args): + bus_id, + etrago_network, + scn_name): """ - Reads eTraGo Results from Database and returns an Object of the Interface class ETraGoSpecs + Reads eTraGo Results from Database and returns and returns + the interface values as a dictionary of corresponding dataframes Parameters ---------- - session : :class:`~.` #Todo: Add class etc.... - Oemof session object (Database Interface) + session : sqlalchemy.orm.session.Session + Handles conversations with the database. bus_id : int ID of the corresponding HV bus - eTraGo : :class:`~.` #Todo: Add class etc.... + etrago_network: :class:`etrago.tools.io.NetworkScenario` + eTraGo network object compiled by :meth:`etrago.appl.etrago` + scn_name : str + Name of used scenario 'Status Quo', 'NEP 2035' or 'eGo 100' Returns ------- - etragospecs : :class:~.` - eDisGo ETraGoSpecs Object + :obj:`dict` of :pandas:`pandas.DataFrame` + Dataframes used as eDisGo inputs """ - print("\nSpecs Direct") specs_meta_data = {} performance = {} - specs_meta_data.update({'TG Bus ID':bus_id}) - - ormclass_result_meta = model_draft.__getattribute__('EgoGridPfHvResultMeta') - ormclass_aggr_w = model_draft.__getattribute__('ego_supply_aggr_weather_mview') - ormclass_source = model_draft.__getattribute__('EgoGridPfHvSource') - - snap_idx = eTraGo.snapshots - - if args['global']['recover'] == True: # If the results are beeing recovered, the scn_name cannot be used from Scenario Settings File - result_id = args['global']['result_id'] - scn_name = session.query( - ormclass_result_meta.scn_name - ).filter( - ormclass_result_meta.result_id == result_id - ).scalar( - ) - else: - scn_name = args['eTraGo']['scn_name'] - specs_meta_data.update({'scn_name':scn_name}) - - if scn_name == 'SH Status Quo': - scn_name = 'Status Quo' + specs_meta_data.update({'TG Bus ID': bus_id}) + +# ormclass_result_meta = model_draft.__getattribute__('EgoGridPfHvResultMeta') + ormclass_gen_single = model_draft.__getattribute__( + 'EgoSupplyPfGeneratorSingle') +# ormclass_aggr_w = model_draft.t_ego_supply_aggr_weather_mview + +# __getattribute__( +# 'ego_supply_aggr_weather_mview') + logger.warning('Weather table taken from model_draft') +# ormclass_source = model_draft.__getattribute__('EgoGridPfHvSource') +# logger.warning('Source table taken from model_draft') + + snap_idx = etrago_network.snapshots + +# # If the results are beeing recovered, the scn_name cannot be used from Scenario Settings File +# if args['global']['recover'] == True: +# result_id = args['global']['result_id'] +# scn_name = session.query( +# ormclass_result_meta.scn_name +# ).filter( +# ormclass_result_meta.result_id == result_id +# ).scalar( +# ) +# else: +# scn_name = args['eTraGo']['scn_name'] +# specs_meta_data.update({'scn_name': scn_name}) +# +# if scn_name == 'SH Status Quo': +# scn_name = 'Status Quo' # Generators t0 = time.perf_counter() - weather_dpdnt = ['wind','solar'] + weather_dpdnt = ['wind', 'solar', 'wind_onshore', 'wind_offshore'] - ## DF procesing - all_gens_df = eTraGo.generators[eTraGo.generators['bus'] == str(bus_id)] + # DF procesing + all_gens_df = etrago_network.generators[etrago_network.generators['bus'] == str( + bus_id)] all_gens_df.reset_index(inplace=True) - all_gens_df = all_gens_df.rename(columns={'index':'generator_id'}) + all_gens_df = all_gens_df.rename(columns={'index': 'generator_id'}) all_gens_df = all_gens_df[['generator_id', 'p_nom', 'p_nom_opt', 'carrier']] - names = [] - for index, row in all_gens_df.iterrows(): - carrier = row['carrier'] - name = session.query( - ormclass_source.name - ).filter( - ormclass_source.source_id == carrier - ).scalar( - ) + all_gens_df = all_gens_df.rename(columns={"carrier": "name"}) - names.append(name) + all_gens_df = all_gens_df[all_gens_df['name'] != 'wind_offshore'] + logger.warning('Wind offshore is disregarded in the interface') - all_gens_df['name'] = names - all_gens_df = all_gens_df.drop(['carrier'], axis=1) + for index, row in all_gens_df.iterrows(): + name = row['name'] + if name == 'wind_onshore': + all_gens_df.at[index, 'name'] = 'wind' + logger.warning('wind onshore is renamed to wind') + +# print(all_gens_df) +# names = [] +# for index, row in all_gens_df.iterrows(): +# carrier = row['carrier'] +# name = session.query( +# ormclass_source.name +# ).filter( +# ormclass_source.source_id == carrier +# ).scalar( +# ) +# +# names.append(name) +# all_gens_df['name'] = names - ## Conventionals +# all_gens_df = all_gens_df.drop(['carrier'], axis=1) + + # Conventionals t1 = time.perf_counter() - performance.update({'Generator Data Processing':t1-t0}) + performance.update({'Generator Data Processing': t1-t0}) conv_df = all_gens_df[~all_gens_df.name.isin(weather_dpdnt)] - conv_cap = conv_df[['p_nom','name']].groupby('name').sum().T + conv_cap = conv_df[['p_nom', 'name']].groupby('name').sum().T conv_dsptch_norm = pd.DataFrame(0.0, - index=snap_idx, - columns=list(set(conv_df['name']))) + index=snap_idx, + columns=list(set(conv_df['name']))) + conv_dsptch_abs = pd.DataFrame(0.0, + index=snap_idx, + columns=list(set(conv_df['name']))) for index, row in conv_df.iterrows(): generator_id = row['generator_id'] source = row['name'] - p = eTraGo.generators_t.p[str(generator_id)] + p = etrago_network.generators_t.p[str(generator_id)] p_norm = p / conv_cap[source]['p_nom'] conv_dsptch_norm[source] = conv_dsptch_norm[source] + p_norm + conv_dsptch_abs[source] = conv_dsptch_abs[source] + p - ## Renewables + # Renewables t2 = time.perf_counter() - performance.update({'Conventional Dispatch':t2-t1}) - ### Capacities + performance.update({'Conventional Dispatch': t2-t1}) + # Capacities ren_df = all_gens_df[all_gens_df.name.isin(weather_dpdnt)] - w_ids = [] + +# w_ids = [] for index, row in ren_df.iterrows(): aggr_id = row['generator_id'] w_id = session.query( - ormclass_aggr_w.c.w_id - ).filter( - ormclass_aggr_w.c.aggr_id == aggr_id, - ormclass_aggr_w.c.scn_name == scn_name - ).scalar( - ) + ormclass_gen_single.w_id + ).filter( + ormclass_gen_single.aggr_id == aggr_id, + ormclass_gen_single.scn_name == scn_name + ).limit(1).scalar( + ) - w_ids.append(w_id) + ren_df.at[index, 'w_id'] = w_id +# w_ids.append(w_id) - ren_df = ren_df.assign(w_id=pd.Series(w_ids, index=ren_df.index)) - ren_df.dropna(inplace=True) ##This should be unnecessary (and I think it isnt) +# ren_df = ren_df.assign(w_id=pd.Series(w_ids, index=ren_df.index)) +# # This should be unnecessary (and I think it isnt) + ren_df.dropna(inplace=True) +# print(ren_df) aggr_gens = ren_df.groupby([ - 'name', - 'w_id' - ]).agg({'p_nom': 'sum'}).reset_index() + 'name', + 'w_id' + ]).agg({'p_nom': 'sum'}).reset_index() aggr_gens.rename(columns={'p_nom': 'p_nom_aggr'}, inplace=True) aggr_gens['ren_id'] = aggr_gens.index +# print(aggr_gens) + ### Dispatch and Curteilment + potential = pd.DataFrame(0.0, + index=snap_idx, + columns=aggr_gens['ren_id']) dispatch = pd.DataFrame(0.0, index=snap_idx, columns=aggr_gens['ren_id']) curtailment = pd.DataFrame(0.0, - index=snap_idx, - columns=aggr_gens['ren_id']) + index=snap_idx, + columns=aggr_gens['ren_id']) + +# potential_abs = pd.DataFrame(0.0, +# index=snap_idx, +# columns=aggr_gens['ren_id']) +# dispatch_abs = pd.DataFrame(0.0, +# index=snap_idx, +# columns=aggr_gens['ren_id']) +# curtailment_abs = pd.DataFrame(0.0, +# index=snap_idx, +# columns=aggr_gens['ren_id']) for index, row in ren_df.iterrows(): gen_id = row['generator_id'] name = row['name'] w_id = row['w_id'] ren_id = int(aggr_gens[ - (aggr_gens['name'] == name) & - (aggr_gens['w_id'] == w_id)]['ren_id']) + (aggr_gens['name'] == name) & + (aggr_gens['w_id'] == w_id)]['ren_id']) - p_nom_aggr = float(aggr_gens[aggr_gens['ren_id'] == ren_id]['p_nom_aggr']) - p_nom = float(ren_df[ren_df['generator_id'] == gen_id]['p_nom']) + p_nom_aggr = float( + aggr_gens[aggr_gens['ren_id'] == ren_id]['p_nom_aggr']) +# p_nom = float(ren_df[ren_df['generator_id'] == gen_id]['p_nom']) + p_nom = row['p_nom'] - p_series = eTraGo.generators_t.p[str(gen_id)] + p_series = etrago_network.generators_t.p[str(gen_id)] p_norm_tot_series = p_series / p_nom_aggr - p_max_pu_series = eTraGo.generators_t.p_max_pu[str(gen_id)] + p_max_pu_series = etrago_network.generators_t.p_max_pu[str(gen_id)] +# p_max_series = p_max_pu_series * p_nom p_max_norm_tot_series = p_max_pu_series * p_nom / p_nom_aggr - p_curt_norm_tot_series = p_max_norm_tot_series - p_norm_tot_series +# p_curt_tot_series = p_max_series - p_series +# p_curt_norm_tot_series = p_max_norm_tot_series - p_norm_tot_series + potential[ren_id] = potential[ren_id] + p_max_norm_tot_series dispatch[ren_id] = dispatch[ren_id] + p_norm_tot_series - curtailment[ren_id] = curtailment[ren_id] + p_curt_norm_tot_series +# curtailment[ren_id] = curtailment[ren_id] + p_curt_norm_tot_series + + potential = potential.round(3) + dispatch = dispatch.round(3) + + logger.warning('Rounding normalized values') + curtailment = potential.sub(dispatch) + + +# potential_abs[ren_id] = potential_abs[ren_id] + p_max_series +# dispatch_abs[ren_id] = dispatch_abs[ren_id] + p_series +# curtailment_abs[ren_id] = curtailment_abs[ren_id] + p_curt_tot_series + + +# potential = dispatch + curtailment + + new_columns = [ + (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], + aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) + for col in potential.columns] + potential.columns = pd.MultiIndex.from_tuples(new_columns) + + new_columns = [ + (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], + aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) + for col in dispatch.columns] + dispatch.columns = pd.MultiIndex.from_tuples(new_columns) + + new_columns = [ + (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], + aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) + for col in curtailment.columns] + curtailment.columns = pd.MultiIndex.from_tuples(new_columns) + +# new_columns = [ +# (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], +# aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) +# for col in potential_abs.columns] +# potential_abs.columns = pd.MultiIndex.from_tuples(new_columns) +# +# new_columns = [ +# (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], +# aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) +# for col in dispatch_abs.columns] +# dispatch_abs.columns = pd.MultiIndex.from_tuples(new_columns) +# +# new_columns = [ +# (aggr_gens[aggr_gens.ren_id == col].name.iloc[0], +# aggr_gens[aggr_gens.ren_id == col].w_id.iloc[0]) +# for col in curtailment_abs.columns] +# curtailment_abs.columns = pd.MultiIndex.from_tuples(new_columns) +# +# potential_abs = potential_abs * 1000 # Absolute amounts in kW +# dispatch_abs = dispatch_abs * 1000 +# curtailment_abs = curtailment_abs * 1000 # Storage t3 = time.perf_counter() - performance.update({'Renewable Dispatch and Curt.':t3-t2}) - ## Capactiy - stor_df = eTraGo.storage_units[eTraGo.storage_units['bus'] == str(bus_id)] + performance.update({'Renewable Dispatch and Curt.': t3-t2}) + # Capactiy + stor_df = etrago_network.storage_units[etrago_network.storage_units['bus'] == str( + bus_id)] stor_df.reset_index(inplace=True) - stor_df = stor_df.rename(columns={'index':'storage_id'}) + stor_df = stor_df.rename(columns={'index': 'storage_id'}) stor_df = stor_df[[ - 'storage_id', - 'p_nom_opt', - 'p_nom', - 'max_hours', - 'carrier']] - - names = [] - for index, row in stor_df.iterrows(): - carrier = row['carrier'] - name = session.query( - ormclass_source.name - ).filter( - ormclass_source.source_id == carrier - ).scalar( - ) - - names.append(name) + 'storage_id', + 'p_nom_opt', + 'p_nom', + 'max_hours', + 'carrier']] + +# print(stor_df) + +# names = [] +# for index, row in stor_df.iterrows(): +# carrier = row['carrier'] +# name = session.query( +# ormclass_source.name +# ).filter( +# ormclass_source.source_id == carrier +# ).scalar( +# ) +# +# names.append(name) +# +# stor_df = stor_df.assign(name=pd.Series(names, index=stor_df.index)) +# stor_df = stor_df.drop(['carrier'], axis=1) - stor_df = stor_df.assign(name=pd.Series(names, index=stor_df.index)) - stor_df = stor_df.drop(['carrier'], axis=1) + stor_df = stor_df.rename(columns={"carrier": "name"}) stor_df['capacity_MWh'] = stor_df['p_nom_opt'] * stor_df['max_hours'] @@ -546,68 +693,56 @@ def get_etragospecs_direct(session, if row['max_hours'] >= 20.0: stor_df.at[index, 'name'] = 'ext_long_term' else: - stor_df.at[index, 'name'] = 'battery' # ToDo: find a more generic solution + # ToDo: find a more generic solution + stor_df.at[index, 'name'] = 'battery' count_bat += 1 -### Project Specific Battery Capacity - battery_capacity = 0.0 # MWh +# Project Specific Battery Capacity + battery_capacity = 0.0 # MWh for index, row in stor_df.iterrows(): if row['name'] == 'battery': battery_capacity = battery_capacity + row['capacity_MWh'] - ### Project Specific Battery Active Power - battery_active_power = pd.Series(0.0, index = snap_idx) + # Project Specific Battery Active Power + battery_active_power = pd.Series(0.0, index=snap_idx) for index, row in stor_df.iterrows(): name = row['name'] stor_id = row['storage_id'] if name == 'battery': - stor_series = eTraGo.storage_units_t.p[str(stor_id)] + stor_series = etrago_network.storage_units_t.p[str(stor_id)] stor_series_kW = stor_series * 1000 battery_active_power = battery_active_power + stor_series_kW t4 = time.perf_counter() - performance.update({'Storage Data Processing and Dispatch':t4-t3}) - - specs = ETraGoSpecs(battery_capacity=battery_capacity, - battery_active_power=battery_active_power, - - conv_dispatch=conv_dsptch_norm, - - renewables=aggr_gens, - ren_dispatch=dispatch, - ren_curtailment=curtailment) + performance.update({'Storage Data Processing and Dispatch': t4-t3}) + + specs = { + # 'battery_capacity': battery_capacity, + # 'battery_active_power': battery_active_power, + 'conv_dispatch': conv_dsptch_norm, + # 'conv_dispatch_abs': conv_dsptch_abs, + # 'renewables': aggr_gens, + 'dispatch': dispatch, + # 'dispatch_abs': dispatch_abs, + 'potential': potential, + # 'potential_abs': potential_abs, + 'curtailment': curtailment # , + # 'curtailment_abs': curtailment_abs + } + +# specs = ETraGoSpecs(battery_capacity=battery_capacity, +# battery_active_power=battery_active_power, +# +# conv_dispatch=conv_dsptch_norm, +# +# renewables=aggr_gens, +# ren_dispatch=dispatch, +# ren_curtailment=curtailment) t5 = time.perf_counter() - performance.update({'Overall time':t5-t0}) + performance.update({'Overall time': t5-t0}) - #print(performance) - -# print("\n Conventional Dispatch (Normalized): \n", -# conv_dsptch_norm, -# "\n\n Renewable Generators: \n", -# aggr_gens, -# "\n\n Renewable Dispatch: \n", -# dispatch, -# "\n\n Renewable Curtailment: \n", -# curtailment, "\n\n") -# # for keys,values in performance.items(): # print(keys, ": ", values) return specs - - -def get_mvgrid_from_bus_id(session, - bus_id): - # Mapping - ormclass_hvmv_subst = model_draft.__getattribute__('EgoGridHvmvSubstation') - subst_id = session.query( - ormclass_hvmv_subst.subst_id - ).filter( - ormclass_hvmv_subst.otg_id == bus_id - ).scalar( - ) - #ToDo Check if subst_id is really the mv grid ID - # Anyway, this should be adapted by Dingo - return subst_id - diff --git a/ego/tools/storages.py b/ego/tools/storages.py new file mode 100644 index 00000000..b76ee34c --- /dev/null +++ b/ego/tools/storages.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# File description +"""This module contains functions to summarize and studies on storages. +""" + +import io +import os +import logging +logger = logging.getLogger('ego') + +if not 'READTHEDOCS' in os.environ: + import pandas as pd + import numpy as np + +__copyright__ = ("Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke,maltesc" + + +def total_storage_charges(network): + """Sum up the pysical storage values of the total scenario based on + eTraGo results. + + Parameters + ---------- + network : :class:`etrago.tools.io.NetworkScenario` + eTraGo ``NetworkScenario`` based on PyPSA Network. See also + `pypsa.network `_ + + Returns + ------- + results : :pandas:`pandas.DataFrame` + Summarize and returns a ``DataFrame`` of the storages optimaziation. + + Notes + ----- + + The ``results`` dataframe inclueds following parameters: + + charge : numeric + Quantity of charged Energy in MWh over scenario time steps + discharge : numeric + Quantity of discharged Energy in MWh over scenario time steps + count : int + Number of storage units + p_nom_o_sum: numeric + Sum of optimal installed power capacity + """ + + charge = network.storage_units_t.\ + p[network.storage_units_t.p[network. + storage_units[network.storage_units. + p_nom_opt > 0].index]. + values > 0.].groupby(network.storage_units. + carrier, axis=1).sum().sum() + + discharge = network.storage_units_t.p[network.storage_units_t. + p[network. + storage_units[network.storage_units. + p_nom_opt > 0]. + index].values < 0.].\ + groupby(network.storage_units.carrier, axis=1).sum().sum() + + count = network.storage_units.bus[network.storage_units.p_nom_opt > 0].\ + groupby(network.storage_units.carrier, axis=0).count() + + p_nom_sum = network.storage_units.p_nom.groupby(network.storage_units. + carrier, axis=0).sum() + + p_nom_o_sum = network.storage_units.p_nom_opt.groupby(network.storage_units. + carrier, axis=0).sum() + p_nom_o = p_nom_sum - p_nom_o_sum # Zubau + + results = pd.concat([charge.rename('charge'), discharge.rename('discharge'), + p_nom_sum, count.rename('total_units'), p_nom_o + .rename('extension'), ], axis=1, join='outer') + + return results + + +def etrago_storages(network): + """Using function ``total_storage_charges`` for storage and grid expantion + costs of eTraGo. + + Parameters + ---------- + network : :class:`etrago.tools.io.NetworkScenario` + eTraGo ``NetworkScenario`` based on PyPSA Network. See also + `pypsa.network `_ + + Returns + ------- + storages : :pandas:`pandas.DataFrame` + DataFrame with cumulated results of storages + + """ + # Charge / discharge (MWh) and installed capacity MW + storages = total_storage_charges(network=network) + + return storages + + +def etrago_storages_investment(network, json_file): + """Calculate storage investment costs of eTraGo + + Parameters + ---------- + network : :class:`etrago.tools.io.NetworkScenario` + eTraGo ``NetworkScenario`` based on PyPSA Network. See also + `pypsa.network `_ + + + Returns + ------- + storage_costs : numeric + Storage costs of selected snapshots in [EUR] + + """ + # check settings for extendable + if 'storages' not in json_file['eTraGo']['extendable']: + print("The optimizition was not using parameter 'extendable': storages") + print("No storages expantion costs from etrago") + + if 'storages' in json_file['eTraGo']['extendable']: + + # get v_nom + _bus = pd.DataFrame(network.buses['v_nom']) + _bus.reset_index(level=0, inplace=True) + + _storage = network.storage_units[network.storage_units.p_nom_opt != 0] + + # provide storage installation costs per voltage level + installed_storages = \ + pd.merge(_storage, _bus, left_on='bus', right_on='index') + + installed_storages['investment_costs'] = (installed_storages. + capital_cost * + installed_storages.p_nom_opt) + + # add voltage_level + installed_storages['voltage_level'] = 'unknown' + + ix_ehv = installed_storages[installed_storages['v_nom'] >= 380].index + installed_storages.set_value(ix_ehv, 'voltage_level', 'ehv') + + ix_hv = installed_storages[(installed_storages['v_nom'] <= 220) & + (installed_storages['v_nom'] >= 110)].index + installed_storages.set_value(ix_hv, 'voltage_level', 'hv') + + storages_investment = installed_storages[ + ['voltage_level', 'investment_costs']].groupby('voltage_level').\ + sum().reset_index() + storages_investment = storages_investment.\ + rename(columns={'investment_costs': 'capital_cost'}) + + return storages_investment diff --git a/ego/tools/utilities.py b/ego/tools/utilities.py index 6f8e6454..bef24f1d 100644 --- a/ego/tools/utilities.py +++ b/ego/tools/utilities.py @@ -1,57 +1,138 @@ -""" -Utility functions of eGo -""" - +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Europa-Universität Flensburg, +# Flensburg University of Applied Sciences, +# Centre for Sustainable Energy Systems +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# File description +"""This module contains utility functions for the eGo application. +""" import os import pandas as pd import json -# import scenario settings **args +import logging -def get_scenario_setting(json_file='scenario_setting.json'): - """ Get and open json file with scenaio settings of eGo +__copyright__ = ("Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke" + + +def define_logging(log_name='ego.log'): + """Helpers to log your modeling process with eGo and defines all settings. Parameters ---------- + log_name : str + Name of log file. Default: ``ego.log``. + + Returns + ------- + logger : :class:`logging.basicConfig`. + Set up ``logger`` object of package ``logging`` + """ + + # ToDo: Logger should be set up more specific + # add pypsa and other logger INFO to ego.log + + # Logging + logging.basicConfig(format='%(asctime)s %(message)s', + level=logging.INFO) + + logger = logging.getLogger(__name__) + logger = logging.getLogger('ego') + + logger = logging.FileHandler(log_name, mode='w') + + formatter = logging.Formatter('%(asctime)s - %(name)s - \ + %(levelname)s - %(message)s') + logger.setFormatter(formatter) + + # logger.addHandler(xy) - json_file (str): - default: 'scenario_setting.json' + return logger + + +logger = define_logging(log_name='ego.log') + +# import scenario settings **args + + +def get_scenario_setting(jsonpath='scenario_setting.json'): + """Get and open json file with scenaio settings of eGo. + The settings incluede global, eTraGo and eDisGo specific + settings of arguments and parameters for a reproducible + calculation. + + Parameters + ---------- + json_file : str + Default: ``scenario_setting.json`` Name of scenario setting json file + + Returns + ------- + json_file : dict + Dictionary of json file """ path = os.getcwd() # add try ego/ - print(path) - with open(path +'/'+json_file) as f: - scn_set = json.load(f) + print("Your path is:\n", path) + + with open(path + '/'+jsonpath) as f: + json_file = json.load(f) - if scn_set['global'].get('eTraGo') == True: - print('Use eTraGo settings') + if json_file['global'].get('eTraGo') == True: + + print('Using and importing eTraGo settings') + + # special case of SH and model_draft + # ToDo: check and maybe remove this part sh_scen = ["SH Status Quo", "SH NEP 2035", "SH eGo 100"] - if scn_set['eTraGo'].get('scn_name') in sh_scen and scn_set['eTraGo'].\ - get('gridversion') == "v0.3.0": - scn_set['eTraGo']['gridversion'] = None + if json_file['eTraGo'].get('scn_name') in sh_scen and json_file['eTraGo'].\ + get('gridversion') == "v0.4.2": + json_file['eTraGo']['gridversion'] = None - if scn_set['global'].get('eDisGo') == True: + # add global parameter to eTraGo scn_set + json_file['eTraGo'].update({'db': json_file['global'].get('db')}) + json_file['eTraGo'].update( + {'gridversion': json_file['global'].get('gridversion')}) + + if json_file['global'].get('eDisGo') == True: print('Use eDisGo settings') - return scn_set + return json_file + -def get_time_steps(args): +def get_time_steps(json_file): """ Get time step of calculation by scenario settings. Parameters ---------- - args (dict): - dict of 'scenario_setting.json' + json_file : :obj:`dict` + Dictionary of the ``scenario_setting.json`` file Returns ------- - time_step (int): + time_step : int Number of timesteps of the calculation. """ - end = args['eTraGo'].get('end_snapshot') - start = args['eTraGo'].get('start_snapshot') + end = json_file['eTraGo'].get('end_snapshot') + start = json_file['eTraGo'].get('start_snapshot') time_step = end - start return time_step diff --git a/ego_dependencies.txt b/ego_dependencies.txt index 050263f1..6f1d5191 100644 --- a/ego_dependencies.txt +++ b/ego_dependencies.txt @@ -1,8 +1,8 @@ # Packages of eGo including dependencies of eDisGo, etraGo and ding0 # cloned Packages of eGo: --e git+git@github.com:python-visualization/folium.git@5739244acb9868d001032df288500a047b232857#egg=folium --e git+https://github.com/openego/PyPSA.git@641f1e569966cfdd4cbd1c6810ade69d90a54286#egg=pypsa +-e git+git@github.com:python-visualization/folium.git@master#egg=folium +-e git+https://github.com/openego/PyPSA.git@dev#egg=pypsa # Used Packages and dependencies appdirs==1.4.3 @@ -20,7 +20,7 @@ demandlib==0.1.1 descartes==1.1.0 ding0==0.1.4 eDisGo==0.0.1 --e git+https://github.com/openego/eGo@43e840b79c00aba2d0eab9b6e632fe438ebb30a2#egg=eGo +-e git+https://github.com/openego/eGo@dev#egg=eGo egoio==0.3.0 ephem==3.7.6.0 eTraGo==0.5.1 diff --git a/requirements.txt b/requirements.txt index 0c733bb6..f416bb31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,4 @@ numpy == 1.12.1 numpydoc == 0.7.0 sphinxcontrib-httpdomain aiohttp_jinja2 +sphinx-jsondomain diff --git a/setup.py b/setup.py index afe0df17..8bc8f3f0 100644 --- a/setup.py +++ b/setup.py @@ -1,43 +1,57 @@ -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, Next Energy, " +import os +from setuptools import find_packages, setup + +__copyright__ = ("Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems") __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "wolf_bunke" +__author__ = "wolf_bunke, maltesc" -from setuptools import find_packages, setup -import os +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + setup(name='eGo', + version='0.2.0', author='wolfbunke, maltesc', author_email='', - description='A python package for distribution and transmission grid analysis and optimization based eDisGo and eTraGo', - version='0.1.0', - url='https://github.com/openego/eGo', + description=("A python package for distribution and transmission" + "grid analysis and optimization based eDisGo and eTraGo"), + long_description=read('README.rst'), + long_description_content_type="text/x-rst", + url='https://github.com/openego/eGo', license="GNU Affero General Public License Version 3 (AGPL-3.0)", packages=find_packages(), + package_dir={'ego': 'ego'}, include_package_data=True, - install_requires=['egoio == 0.3.0', - 'eDisGo == 0.0.2', - 'pandas ==0.20.3', - 'sqlalchemy >= 1.0.15, <= 1.2.0', - 'geoalchemy2 >= 0.3.0, <=0.4.0', - 'pyproj == 1.9.5.1', + install_requires=['egoio==0.4.1', + 'eDisGo==0.0.4', + 'eTraGo==0.6.1', + 'pandas==0.20.3', + 'pypsa==0.11.0fork', + 'sqlalchemy<=1.1.4,>=1.0.15', + 'geoalchemy2>= 0.3.0, <=0.4.0', + 'pyproj==1.9.5.1', 'geopandas==0.3.0', + 'matplotlib>= 1.5.3, <=1.5.3', 'Rtree==0.8.3', 'plotly==2.2.3', - 'eTraGo==0.5.1', - 'matplotlib >= 1.5.3, <=1.5.3'], - dependency_links=['git+https://git@github.com/openego/PyPSA.git@dev#egg=PyPSA', - 'git+https://git@github.com:python-visualization/folium.git@5739244acb9868d001032df288500a047b232857' - ], - extras_require={ - 'docs': [ - 'sphinx >= 1.4', - 'sphinx_rtd_theme', - 'sphinxcontrib-httpdomain']}, - package_data={ - 'ego': [ - os.path.join('*.json'), - os.path.join('tools','*.json'), - os.path.join('data','*.csv') ] - } - ) + 'Pyomo==5.5.0', + 'oedialect' + ], + dependency_links=[('git+https://git@github.com/openego/PyPSA.git' + '@dev#egg=pypsa-0.11.0fork') + ], + extras_require={ + 'doc': [ + 'sphinx >= 1.4', + 'sphinx_rtd_theme', + 'sphinxcontrib-httpdomain']}, + package_data={ + 'ego': [os.path.join('tools', '*.csv')], + 'ego': [os.path.join('tools', '*.json')], + 'ego': [os.path.join('', '*.json')], + 'ego.data': ['*.csv'] + }, + )