diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..e285d975 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +Fixes # + +## Before merging into `dev`-branch, please make sure that the following points are checked: + +- [ ] All pre-commit tests passed locally with: `pre-commit run -a` +- [ ] File `CHANGELOG.md` was updated +- [ ] The docs were updated + - [ ] if `dataset.md`s were updated, the dataset docs were regenerated using + `python docs/generate_dataset_mds.py` + +If packages were modified: +- [ ] File `poetry.lock` was updated with: `poetry lock` +- [ ] A new env was successfully set up + +WARNING: When modifying use snakemake <=7.32.0, cf. #186 + +If data flow was adjusted: +- [ ] Data pipeline run finished successfully with: `snakemake -jX` +- [ ] Esys appdata was created successfully with: `snakemake -jX make_esys_appdata` + + (with `X` = desired number of cores, e.g. 1) diff --git a/.gitignore b/.gitignore index b29049c0..f3d4d508 100644 --- a/.gitignore +++ b/.gitignore @@ -71,6 +71,9 @@ instance/ # Sphinx documentation docs/_build/ +# Mkdocs documentation +site/ + # PyBuilder target/ @@ -132,3 +135,29 @@ dmypy.json .DS_Store .snakemake .idea + +# Data files +digipipe/store/raw/*/data/* +!digipipe/store/raw/*/data/.gitkeep +!digipipe/store/raw/.TEMPLATE/data/* + +digipipe/store/preprocessed/*/data/* +!digipipe/store/preprocessed/*/data/.gitkeep +!digipipe/store/preprocessed/.TEMPLATE/data/* + +digipipe/store/datasets/*/data/* +!digipipe/store/datasets/*/data/.gitkeep +!digipipe/store/datasets/.TEMPLATE/data/* + +digipipe/store/appdata/esys/* +digipipe/store/appdata/*/data/* +!digipipe/store/appdata/*/data/.gitkeep +!digipipe/store/appdata/datapackage/data/captions/.gitkeep +!digipipe/store/appdata/datapackage/data/geodata/.gitkeep +!digipipe/store/appdata/datapackage/data/scalars/.gitkeep +!digipipe/store/appdata/datapackage/data/settings/.gitkeep +!digipipe/store/appdata/datapackage/data/sequences/.gitkeep +!digipipe/store/appdata/*/.gitkeep + +digipipe/store/temp/* +!digipipe/store/temp/.gitkeep \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..2c88cf98 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,107 @@ +# TODO: Find out if something needs to be changed in here in comparison to digiplan + +exclude: 'docs|node_modules|vendors|migrations|.git|.tox' +default_stages: [commit] +fail_fast: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-added-large-files + + - repo: https://github.com/charliermarsh/ruff-pre-commit + # Ruff version. + rev: 'v0.0.244' + hooks: + - id: ruff + args: ["--fix"] + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python) + args: ["--line-length", "80", "black"] + + - repo: local + hooks: + - id: black + name: black + entry: black + language: python + types: [python] + - id: flake8 + name: flake8 + args: + - --max-line-length=80 + - --ignore=DAR101,DAR201,F821,DAR401,W503,A001,A003,E800,E722,B001,B008,B023,C408,C417,W605,B007,S101 + entry: flake8 + language: python + types: [python] + - id: pylint + name: pylint + entry: env DATABASE_URL=null env PROJ_LIB=null USE_DOCKER=null pylint + language: system + types: [python] + args: + [ + "-rn", # Only display messages + "-sn", # Don't display the score + "--disable=E0602", # Disable the E0602 error + "--disable=C0114", # Disable the C0114 error TODO: To be fixed + "--disable=C0116", # Disable the C0116 error TODO: To be fixed + "--disable=R1729", # Disable the R1729 error TODO: To be fixed + "--disable=C0103", # Disable the C0103 error TODO: To be fixed + "--disable=R0801", # Disable the R0801 error TODO: To be fixed + "--disable=W1514", # Disable the W1514 error TODO: To be fixed + "--disable=R1734", # Disable the R1734 error TODO: To be fixed + "--disable=R1735", # Disable the R1735 error TODO: To be fixed + "--disable=W0612", # Disable the W0612 error TODO: To be fixed + "--disable=W1401", # Disable the W1401 error TODO: To be fixed + "--disable=W0511", # Disable the W0511 error TODO: To be fixed + "--disable=R0913", # Disable the R0913 error TODO: To be fixed + "--disable=R1705", # Disable the R1705 error TODO: To be fixed + "--disable=E0401", # Disable the E0401 error TODO: To be fixed + "--disable=E0611", # Disable the E0611 error TODO: To be fixed + "--disable=W0621", # Disable the W0621 error TODO: To be fixed + "--disable=W0640", # Disable the W0640 error TODO: To be fixed + "--disable=C0115", # Disable the C0115 error TODO: To be fixed + "--disable=R1725", # Disable the R1725 error TODO: To be fixed + "--disable=R0903", # Disable the R0903 error TODO: To be fixed + "--disable=C0302", # Disable the C0302 error TODO: To be fixed + "--disable=W0707", # Disable the W0707 error TODO: To be fixed + "--disable=W0108", # Disable the W0108 error TODO: To be fixed + "--disable=W1203", # Disable the W1203 error TODO: To be fixed + "--disable=W0622", # Disable the W0622 error TODO: To be fixed + "--disable=R1720", # Disable the R1720 error TODO: To be fixed + "--disable=R0914", # Disable the R0914 error TODO: To be fixed + "--disable=R1724", # Disable the R1724 error TODO: To be fixed + "--disable=W0012", # Disable the W0012 error TODO: To be fixed + "--disable=W0611", # Disable the W0611 error TODO: To be fixed + "--disable=C0207", # Disable the C0207 error TODO: To be fixed + "--max-line-length=80", # Set the maximum line length to 80 + ] + - id: mypy + name: mypy + entry: mypy + language: python + types: [python] + args: + [ + "--ignore-missing-imports", # Ignore missing imports + "--warn-unused-ignores", # Warn about unused ignore comments + "--disable-error-code=name-defined", # To suppress Name "snakemake" + # is not defined [name-defined] + "--disable-error-code=var-annotated", + "--disable-error-code=var-annotated", + "--disable-error-code=dict-item", + "--disable-error-code=arg-type", + "--disable-error-code=assignment", + "--disable-error-code=attr-defined", + "--disable-error-code=index", + "--disable-error-code=misc", + ] diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..ba84eb89 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,23 @@ +# Read the Docs configuration file for MkDocs projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + jobs: + pre_install: + - python docs/generate_dataset_mds.py + + +mkdocs: + configuration: mkdocs.yml + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: docs/requirements.txt \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 52377f3e..bc02224c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,155 @@ All notable changes to this project will be documented in this file. The format is inspired from [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and the versioning aim to respect [Semantic Versioning](http://semver.org/spec/v2.0.0.html). -## [Unreleased] +## [1.0.0] - 2023-11-07 ### Added + - Setup initial structure and files +- Add first bunch of datasets for testing the data flow +- Modularize datasets +- Add first draft of RTD docs +- Add dataset: BNetzA MaStR +- Add Nominatim geocoder +- Add dataset: population +- Clean rule +- Datasets attribute captions +- Create list of region-specific datasets in the docs +- pyproject.toml and poetry.lock file have been added with the conversion to poetry +- Add pre-commit in order to check for errors and linting bugs before commits +- Add types-pyyaml package +- Add dataset: employees and companies +- Add dataset: demandregio electricity demand +- Add dataset: BMWK long-term scenarios +- Add rules to download raw data (zipped) from cloud, extract and copy to 'store/raw' +- Add module 'data_io', containing relevant helper functions (downloading, extracting, copying, cleaning) +- Create metadata +- Add script to generate dataset md files for documentation +- Add dataset: demand_heat_region +- Add dataset: heatpump_cop +- Add dataset: stala_st_energy +- Add dataset: eurostat_lau +- Add dataset: regstat_energy +- Add dataset: dwd_temperature +- Add dataset: ageb_energy_balance +- Add dataset: seenergies_peta5 +- Add dataset: renewables.ninja_feedin +- Add dataset: renewable_feedin +- Add dataset: bnetza_mastr_correction_region and correct wrong units +- Integrate building of energy system for appdata in pipeline via dir *esys* +- Update store with dir structure for *esys* data +- Add creation of empty time series for the *esys* +- Add dataset: rpg_abw_regional_plan +- Add dataset: potentialarea_wind_region +- Add writing of default values to *esys* raw scalar data +- Add datasets: rli_pv_wfr and rli_pv_wfr_region +- Add module appdata to workflow +- Add dataset: geodata_infolayers +- Add dataset: potentialarea_pv_ground_region +- Add dataset: app datapackage +- Add dataset: potentialarea_pv_roof_region +- Add dataset: technology_data +- Add dataset: settings +- Calc panel settings from datasets +- Add dataset: osm (Germany) +- Add dataset: osm_buildings and add stats on ground areas +- Add mapping of costs and efficiencies from store/raw to store/datasets +- Add dataset: emissions +- Add captions to app datapackage (here: MaStR, heat, potentialarea_wind) +- Add mapping of time series data in datasets to empty time series according to + the mapping provided in map_ts.yml +- Add build configuration for readthedocs +- Add creation of stats of development over time for bnetza_mastr_wind_region, + bnetza_mastr_pv_ground_region, bnetza_mastr_pv_roof_region +- Add dataset: dbfz_biomass_heat_capacities +- Add the calculation of relative demand of biomass conversion technologies via + their relative capacities +- Add deletion of all data in store/datasets/esys_raw/data +- Add notes on OSM download and run resources +- Add nominal power per wind turbine for 2045 +- Add technology data for batteries +- Add technology data for thermal storages +- Add definition of relevant regions (NUTS3) in global configuration file +- Add documentation of energy system of ABW ### Changed +- Move dataset docs from config to md files +- Retain mastr_id in MaStR datasets +- Fix loading of empty yml files +- Fix loading of global config when in workflow dir +- Integrate esys Snakefile in workflow Snakefile and update clean rule +- Fix shapely deprecation warning +- Fix ogr2ogr conversion with recent GDAL version (v3.6.2) +- Fix conda installation by removing python-gdal from environment.yml +- The package management in digipipe has been changed to poetry. +- The installation of a virtual environment is done only from the environment.yml file and via conda. +- Apply linters on repo among others: black, isort, check-json and end-of-file-fixer +- Update population with prognoses from demandregio dataset +- Fix C419 flake8 error +- Switch to mkdocs for documentation (Sphinx deleted) +- Normalize renewable feedin timeseries +- Fix instruction to obtain raw files +- Translate all dataset.md files to German +- Exchange *Test_scenario* with *2045_scenario* in *digipipe/esys/scenarios* +- Split each demand per sector in *esys* +- File .gitignore again includes ignoring of esys appdata +- pv_roof area stats: distinguish between all and non-historic buildings +- storage units: add region-wide values for spec. capacity and power for those + connected to PV roof units +- Add data on installed el. power to bmwk_long_term_scenarios +- Disaggregate PV state targets to region in potentialarea_pv_ground_region +- Adapt osm_filtered to ose osm dataset and extract building data +- Disaggregate PV state targets to region in potentialarea_pv_roof_region +- Changes were applied to the energy system. Among others RoR, small batteries + and biogas were added. A distinction was made between centralized and + decentralized CHPs +- Scenario 2045_scenario needs default_scalars.csv instead of scalars.csv +- By default set costs and efficiencies of esys are written to + default_scalars.csv instead of default_costs_efficiencies.csv +- Default variable_costs are passed with input_parameters for storages +- Pass time series instead of scalar with efficiency for central heat pump +- Fix wind+pv_ground default values in panel settings +- Set all default control values in panel settings +- Kick biogas shortage +- Rename dataset captions to app_captions +- Move app settings to datasets and include in app datapackage +- Adapt 2045_scenario.yml so that time series with values are used instead of + empty ts +- Suppress warning of loosing data in source and comment columns while + unstacking if they are empty +- Change max. installable PV roof capacity in panel settings +- Fix panel settings for large batteries +- Add additional captions to MaStR captions +- Use LTS version of OSM +- The unstacking of time series in esys was fixed so that warning is given if + there is at least one value in columns 'source' or 'comment' +- Minor fix applied reformatting with black +- Only use operating units from mastr for municipality stats and temporal + development +- Heat pump ASHP/GSHP split fixed +- Replace the relative demand of biomass with the relative demand of each + biomass conversion technology +- Fix clean rule +- Update raw datapackage URL +- Restrict snakemake version to v7.32.0 +- Add central heat pump targets to slider +- Restrict heat pump sliders to not move under 50% +- Fix pv ground slider values to prevent app to alter SQ value from panel + settings +- Fix PV roof slider values +- Add HP share slider from-max values to prevent 100 % HP share +- Updated technology_data dataset.md and metadata +- Update metadate in store_raw +- Adapt existing dataset rules to use the global region definition (NUTS3) +- Update list of region-specific datasets + ### Removed +- setup.py and requirements.txt files are omitted with the conversion to poetry +- sphinx from poetry environment +- Remove dataset: osm_forest +- Obsolete targets from rule all +- Merge dataset costs_efficiencies into technology_data +- Merge dataset costs_efficiencies into technology_data +- Remove values for redundant subsliders from app datapackage diff --git a/PIPELINE.md b/PIPELINE.md deleted file mode 100644 index 45d269a0..00000000 --- a/PIPELINE.md +++ /dev/null @@ -1 +0,0 @@ -# Pipeline docs diff --git a/README.md b/README.md index 3183b008..37bb9966 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,17 @@ - - +

+ Digiplan logo + RLI logo +

+






-# Digipipe - -Pipeline for data and energy system in the Digiplan project. - -## Installation - -**Note: Linux only, Windows is currently not supported.** - -First, clone via SSH using - - git clone git@github.com:rl-institut-private/digipipe.git /local/path/to/digipipe/ - -### Install using pip: - -Make sure you have Python >= 3.6 installed, let's create a virtual env: - - virtualenv --python=python3.8 venv - source venv/bin/activate +---------- -Some additional system packages are required, install them by - - apt install gdal-bin python-gdal libspatialindex-dev imagemagick - -Notes: -- Make sure you have GDAL>=3.0 as older versions will not work -- `imagemagick` is optional and only required for report creation, cf. [PIPELINE.md](PIPELINE.md) - -Install package with - - pip install -e /local/path/to/djagora_data/ - -### Install using conda - -Make sure you have conda installed, e.g. miniconda. Then create the env: - - conda create -n digipipe /local/path/to/digipipe/environment.yml - conda activate digipipe - -## Structure, pipeline and conventions - -See [PIPELINE.md](PIPELINE.md) +# Digipipe -## Runtime and resources +[![Black code style](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) -**TO BE UPDATED** +Pipeline for data and energy system in the Digiplan project. -**Warning:** A full pipeline run takes 10 hours on a Xeon E5-2690 using 14 -cores and requires about 600 GB of disk space. +- App: https://github.com/rl-institut/digiplan/ +- Documentation: https://digipipe.readthedocs.io +- Install instructions: [installation.md](docs/sections/installation.md) diff --git a/digipipe/__init__.py b/digipipe/__init__.py new file mode 100644 index 00000000..5becc17c --- /dev/null +++ b/digipipe/__init__.py @@ -0,0 +1 @@ +__version__ = "1.0.0" diff --git a/digipipe/config/__init__.py b/digipipe/config/__init__.py new file mode 100644 index 00000000..b18bd828 --- /dev/null +++ b/digipipe/config/__init__.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path + +from digipipe.scripts.config import read_config + + +def locate_global_config() -> Path: + """Returns path to global config file. + + Raises error when executing the workflow not within `digipipe/` or + `digipipe/workflow/`. + """ + config_dir = Path("config", "global.yml") + cwd = Path(os.getcwd()) + if cwd.name == "digipipe": + return (cwd / config_dir).resolve() + elif cwd.name == "workflow": + return (cwd.parent / config_dir).resolve() + else: + raise FileNotFoundError( + "Global config file not found, make sure you execute the workflow " + "in digipipe/ or digipipe/workflow/ ." + ) + + +GLOBAL_CONFIG = {"global": read_config(locate_global_config())} diff --git a/digipipe/config/global.yml b/digipipe/config/global.yml new file mode 100644 index 00000000..74753ef8 --- /dev/null +++ b/digipipe/config/global.yml @@ -0,0 +1,20 @@ +#################################################################### +# This file holds global configuration parameters for the pipeline # +#################################################################### + +geodata: + # target CRS (warning: do not change, cf. DATASETS.md) + crs: "EPSG:3035" + # some input layers have invalid geometries, this is fixed by buffering by a very small distance (in m), + # it is enabled by setting fix_geom_by_buffering to True for the respective layer + fix_geom_buffer: 0.1 + geocoder: + user_agent: "geocoder" + interval_sec: 1 + # Define NUTS3 codes for region of interest + nuts: + - "DEE01" + - "DEE05" + - "DEE0E" +input_data: + download_url: "https://wolke.rl-institut.de/s/aN2ccptGtFsFiDs/download" diff --git a/digipipe/esys/ESYS.md b/digipipe/esys/ESYS.md new file mode 100644 index 00000000..4094fb51 --- /dev/null +++ b/digipipe/esys/ESYS.md @@ -0,0 +1,59 @@ +# Energy system in digiplan + +The energy system in digipipe, the pipeline for digiplan, is created using +[oemof-B3](https://github.com/rl-institut/oemof-B3). + +## Build the energy system + +To test if everything works, you can run the test scenario with + +``` +snakemake -j1 make_esys_appdata +``` + +For this you have to copy the corresponding raw data from into the raw directory +`store/esys_raw/`. +In the future, empty raw data (scalars and time series) will be created +automatically. Then, assumptions on constant parameters such as plant costs, +lifetime and efficiencies are mapped and set as values of the corresponding +variables in the scalars. + +Empty scalars and time series can be created from the energy model setup with + +``` +snakemake -j1 create_empty_scalars +snakemake -j1 create_empty_ts +``` + +These prompts create empty csv files with scalars and time series in the +following directories: + +- `store/datasets/esys_raw/data/scalars/` +- `store/datasets/esys_raw/data/time_series/` + +To set up an empty energy system, the following prompt automatically writes +default scalar values (such as zero or NaN) to the empty scalars: + +``` +snakemake -j1 write_default_scalars +``` +With this the file `empty_scalars.csv` is automatically updated and saved to: +`store/datasets/esys_raw/data/scalars/default_scalars.csv` + +## Test the energy system + +To test the solvability of the energy system, run + +``` +snakemake -j1 postprocessed_esys_appdata +``` + +which should result in an output like + +``` +[...] +INFO - Optimization successful... +INFO - Solved the model. Elapsed time: 0:00:00.291995 +INFO - Model solved. Collecting results. +INFO - Results saved to store/appdata/esys/2045_scenario/optimized. +``` diff --git a/digipipe/esys/Snakefile b/digipipe/esys/Snakefile new file mode 100644 index 00000000..14b81e71 --- /dev/null +++ b/digipipe/esys/Snakefile @@ -0,0 +1,36 @@ +import os + +scenario_groups = { + "all-scenarios": [ + os.path.splitext(scenario)[0] for scenario in os.listdir("esys/scenarios") + ] +} + +# Include rules for intermediate steps +include: "snakemake_rules/build_datapackage.smk" +include: "snakemake_rules/prepare_resource.smk" +include: "snakemake_rules/optimization.smk" +include: "snakemake_rules/postprocessing.smk" +include: "snakemake_rules/create_empty_scalars.smk" +include: "snakemake_rules/create_empty_ts.smk" +include: "snakemake_rules/write_default_scalars.smk" +include: "snakemake_rules/write_costs_efficiencies.smk" +include: "snakemake_rules/write_ts.smk" + +rule make_esys_appdata: + """ + Create esys datapackage for the app from esys raw data + """ + input: + plots=expand( + "store/appdata/esys/{scenario}/preprocessed", + scenario=scenario_groups["all-scenarios"] + ) + +# TODO: To be deleted with issue 52 resolve +rule postprocessed_esys_appdata: + input: + plots=expand( + "store/appdata/esys/{scenario}/postprocessed", + scenario=scenario_groups["all-scenarios"] + ) diff --git a/Snakefile b/digipipe/esys/esys/__init__.py similarity index 100% rename from Snakefile rename to digipipe/esys/esys/__init__.py diff --git a/digipipe/store/0_raw/.gitkeep b/digipipe/esys/esys/config/__init__.py similarity index 100% rename from digipipe/store/0_raw/.gitkeep rename to digipipe/esys/esys/config/__init__.py diff --git a/digipipe/esys/esys/config/colors.csv b/digipipe/esys/esys/config/colors.csv new file mode 100644 index 00000000..a30c536b --- /dev/null +++ b/digipipe/esys/esys/config/colors.csv @@ -0,0 +1,85 @@ +,Color +PV ground,#e2c319 +PV rooftop,#ebd34e +Heat cen. solar thermal collector,#84741d +Heat dec. solar thermal collector,#a59124 +Wind on,#19c5e5 +Hydro reservoir,#4a19e5 +Hydro ROR,#6d19e5 +H2 GT,#75CCF0 +CH4 GT,#71EFA4 + +Large-scale battery,#b41de6 +Large-scale battery out,#b41de6 +Large-scale battery in,#b41de6 +Small-scale battery,#c453eb +Small-scale battery out,#c453eb +Small-scale battery in,#c453eb + +H2 cavern,#19e5c0 +BEV,#e55c19 + +El. shortage,#DADADA +Curtailment,#DADADA + +El. demand hh,#3e89f4 +El. demand cts,#3e89f4 +El. demand ind,#3e89f4 +El. export,#737a8c +El. import,#737a8c +BEV charging,#e55c19 + +CH4 central CCGT,#c0e519 +CH4 decentral CCGT,#daee78 +H2 central CCGT,#13acac +H2 decentral CCGT,#7dc8c7 +Wood central CHP,#92583f +Wood decentral CHP,#a48070 +Biogas central CHP,#b7cbb2 +Biogas decentral CHP,#cfdccb +CH4 central CHP,#128E10 +CH4 decentral CHP,#74B465 +H2 central CHP,#0F8A8A +H2 decentral CHP,#73B0AF + +Heat cen. demand hh,#ff0800 +Heat cen. demand cts,#ff0800 +Heat cen. demand ind,#ff0800 +Heat cen. storage in,#e2c319 +Heat cen. storage,#e2c319 +Heat cen. storage out,#e2c319 +Heat dec. demand hh,#f2740a +Heat dec. demand cts,#f2740a +Heat dec. demand ind,#f2740a +Heat dec. storage out,#e2c319 +Heat dec. storage in,#e2c319 +Heat dec. storage,#e2c319 + +CH4 boiler,#e5194d +Heat pump (DH),#9912A1 +Heat pump (domestic),#f288e3 +res. PtH,#e58a19 + +Electrolyzer,#15B2BC +CH4,#bcf927 + +Other,#7f7f7f + +CH4 import,#cfcfcf +H2 import,#3B5054 +H2 demand,#00fed0 + +Wind off,#73baec +Biomass,#2ca02c +Biogas plant,#7ec073 +Biomass shortage,#DBDBDB +Biogas,#55844d +Biogas upgrading plant,#8cac85 +Biogas shortage,#DBDBDB +Wood,#754632 +Wood shortage,#DBDBDB +Hard coal,black +Lignite,saddlebrown +Lignite shortage,#DBDBDB +Oil,#9467bd +Nuclear ST,pink diff --git a/digipipe/esys/esys/config/colors.yml b/digipipe/esys/esys/config/colors.yml new file mode 100644 index 00000000..f8aafa09 --- /dev/null +++ b/digipipe/esys/esys/config/colors.yml @@ -0,0 +1,72 @@ +solar-pv_ground: "#e2c319" +solar-pv_rooftop: "#ebd34e" +solar-thermalcollector_central: "#84741d" +solar-thermalcollector_decentral: "#a59124" +wind-onshore: "#19c5e5" +hydro-reservoir: "#4a19e5" +hydro-ror: "#6d19e5" +h2-gt: "#75CCF0" +ch4-gt: "#71EFA4" + +electricity-large_scale_battery: "#b41de6" +electricity-small_scale_battery: "#c453eb" + +h2-cavern: "#19e5c0" +bev_charging: "#e55c19" + +electricity-demand_hh: "#3e89f4" +electricity-demand_gdh: "#3e89f4" +electricity-demand_ind: "#3e89f4" +electricity-export: "#737a8c" +electricity-import: "#737a8c" +electricity-bev_charging: "#e55c19" + +h2-bpchp_central: "#0F8A8A" +h2-bpchp_decentral: "#73B0AF" +h2-extchp_central: "#13ACAC" +h2-extchp_decentral: "#7DC8C7" +ch4-bpchp_central: "#128E10" +ch4-bpchp_decentral: "#74B465" +ch4-extchp_central: "#c0e519" +ch4-extchp_decentral: "#DAEE78" + +heat_central-demand_hh: "#ff0800" +heat_central-demand_cts: "#ff0800" +heat_central-demand_ind: "#ff0800" +heat_central-storage: "#e2c319" +heat_decentral-demand_hh: "#f2740a" +heat_decentral-demand_cts: "#f2740a" +heat_decentral-demand_ind: "#f2740a" +heat_decentral-storage: "#e2c319" + +ch4-boiler: "#e5194d" +electricity-heatpump_central: "#9912A1" +electricity-heatpump_decentral: "#f288e3" +electricity-pth: "#e58a19" + +electricity-electrolyzer: "#15B2BC" +ch4: "#bcf927" + +other: "#7f7f7f" + +ch4-import: "#cfcfcf" +h2-import: "#3B5054" +h2-demand: "#00fed0" + +wind-offshore: "#73baec" +biomass: "#2ca02c" +biomass-biogas_plant: "#7ec073" +biomass-shortage: "#DBDBDB" +biogas: "#55844d" +biogas-biogas_upgrading_plant: "#8cac85" +biogas-bpchp_central: "#b7cbb2" +biogas-bpchp_decentral: "#cfdccb" +wood: "#754632" +wood-shortage: "#DBDBDB" +wood-extchp_central: "#92583F" +wood-extchp_decentral: "#a48070" +hard coal: "black" +lignite: "saddlebrown" +lignite-shortage: "#DBDBDB" +oil: "#9467bd" +nuclear: "pink" diff --git a/digipipe/esys/esys/config/esys_conf.py b/digipipe/esys/esys/config/esys_conf.py new file mode 100644 index 00000000..bf9aed45 --- /dev/null +++ b/digipipe/esys/esys/config/esys_conf.py @@ -0,0 +1,97 @@ +import logging +import pathlib +import sys + +from dynaconf import Dynaconf + +from digipipe.scripts.config import read_config + +CONFIG_PATH = pathlib.Path(__file__).parent +ROOT_DIR = CONFIG_PATH.parent.parent + +settings = Dynaconf( + envvar_prefix="DYNACONF", + settings_files=[ + CONFIG_PATH / "settings.yaml", + CONFIG_PATH / ".secrets.yaml", + ], +) + +write_default_scalars = Dynaconf( + envvar_prefix="DYNACONF", + settings_files=[ + CONFIG_PATH / "write_default_scalars.yml", + CONFIG_PATH / ".secrets.yaml", + ], +) + +map_ts = Dynaconf( + envvar_prefix="DYNACONF", + settings_files=[ + CONFIG_PATH / "map_ts.yml", + CONFIG_PATH / ".secrets.yaml", + ], +) + + +class LevelFilter(logging.Filter): + def __init__(self, level): + self.level = level + super(LevelFilter, self).__init__() + + def filter(self, record): + return record.levelno != self.level + + +DEBUG = settings.get("DEBUG", False) +LOGGING_LEVEL = settings.get( + "LOGGING_LEVEL", logging.DEBUG if DEBUG else logging.INFO +) + +root_logger = logging.getLogger() +root_logger.handlers.clear() # Remove the default handler +root_logger.setLevel(LOGGING_LEVEL) + +stream_formatter = logging.Formatter("%(levelname)s - %(message)s") +stream_handler = logging.StreamHandler() +stream_handler.setFormatter(stream_formatter) +stream_handler.addFilter(LevelFilter(logging.ERROR)) +root_logger.addHandler(stream_handler) + +DEFAULT_LOGFILE = "snake.log" + + +def add_snake_logger(rulename): + """ + Adds logging to file + + Logfile is read from input parameters, ending with ".log" + This is done in order to add loggers for subprocesses + (like data_preprocessing.py), where logfile is unknown. + """ + logger = logging.getLogger(rulename) + logfile = next( + (item for item in sys.argv if item.endswith(".log")), DEFAULT_LOGFILE + ) + handler = logging.FileHandler(logfile) + file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + handler.setFormatter(file_formatter) + logger.addHandler(handler) + return logger + + +def load_yaml(file_path): + return read_config(file_path) + + +LABELS = load_yaml(CONFIG_PATH / "labels" / f"{settings.labels}.yml") +raw_colors = load_yaml(CONFIG_PATH / "colors.yml") +COLORS = {} +for label, color in raw_colors.items(): + if label not in LABELS: + continue + COLORS[LABELS[label]] = color + COLORS[f"{LABELS[label]} in"] = color + COLORS[f"{LABELS[label]} out"] = color diff --git a/digipipe/esys/esys/config/labels/de.yml b/digipipe/esys/esys/config/labels/de.yml new file mode 100644 index 00000000..bad124a8 --- /dev/null +++ b/digipipe/esys/esys/config/labels/de.yml @@ -0,0 +1,61 @@ +# oil: Öl Raises error as "oil" is in "boiler" -> duplicate mappings +biomass: Biomasse +biomass-biogas_plant: Biogasanlage +biomass-shortage: Biomasse Shortage +biogas: Biogas +biogas-biogas_upgrading_plant: Biogas-Aufbereitungsanlage +biogas-bpchp_central: Biogas zentrale KWK +biogas-bpchp_decentral: Biogas dezentrale KWK +ch4-boiler: CH4 Boiler +ch4-bpchp_central: CH4 zentrale KWK +ch4-bpchp_decentral: CH4 dezentrale KWK +ch4-extchp_central: CH4 zentrale GUD +ch4-extchp_decentral: CH4 dezentrale GUD +ch4-gt: CH4 GT +ch4-import: CH4 Import +electricity-bev_charging: BEV +electricity-demand_hh: Strombedarf HH +electricity-demand_cts: Strombedarf GHD +electricity-demand_ind: Strombedarf I +electricity-electrolyzer: Elektrolyse +electricity-export: Stromexport +electricity-heatpump_central: Wärmepumpe (groß) +electricity-heatpump_decentral: Wärmepumpe (Haushalt) +electricity-import: Stromimport +electricity-large_scale_battery: Großbatterie +electricity-small_scale_battery: Kleinbatterie +electricity-pth: res. PtH +h2-bpchp_central: H2 zentrale KWK +h2-bpchp_decentral: H2 dezentrale KWK +h2-cavern: H2 Kaverne +h2-demand: H2 Bedarf +h2-excess: H2 Überschuss +h2-extchp_central: H2 zentrale GUD +h2-extchp_decentral: H2 dezentrale GUD +h2-import: H2 Import +h2-shortage: H2 Import +heat_central-demand_hh: zen. Wärmebedarf HH +heat_central-demand_cts: zen. Wärmebedarf GHD +heat_central-demand_ind: zen. Wärmebedarf I +heat_central-storage: zen. Wärmespeicher +heat_decentral-demand_hh: dez. Wärmebedarf HH +heat_decentral-demand_cts: dez. Wärmebedarf GHD +heat_decentral-demand_ind: dez. Wärmebedarf I +heat_decentral-storage: dez. Wärmespeicher +hydro-reservoir: Reservoir +hydro-ror: Laufwasserkraft +lignite: Braunkohle +lignite-shortage: Braunkohle Shortage +nuclear-st: Kernkraftwerk +hard coal: Steinkohle +wood: Holz +wood-extchp_central: Holz zentrale KWK +wood-extchp_decentral: Holz dezentrale KWK +wood-shortage: Holz Shortage +other: Andere +solar-pv_ground: PV Freifläche +solar-pv_rooftop: PV Aufdach +solar-thermalcollector_central: zen. Wärme Solarthermiekollektor +solar-thermalcollector_decentral: dez. Wärme Solarthermiekollektor +wind-offshore: Wind Offshore +wind-onshore: Wind Onshore diff --git a/digipipe/esys/esys/config/labels/en.yml b/digipipe/esys/esys/config/labels/en.yml new file mode 100644 index 00000000..ed92e94b --- /dev/null +++ b/digipipe/esys/esys/config/labels/en.yml @@ -0,0 +1,55 @@ +# oil: Oil Raises error as "oil" is in "boiler" -> duplicate mappings +biomass: Biomass +biomass-biogas_plant: Biogas plant +biomass-shortage: Biomass shortage +biogas: Biogas +biogas-biogas_upgrading_plant: Biogas upgrading plant +biogas-bpchp_central: Biogas central CHP +biogas-bpchp_decentral: Biogas decentral CHP +ch4-boiler: CH4 boiler +ch4-bpchp_central: CH4 central CHP +ch4-bpchp_decentral: CH4 decentral CHP +ch4-extchp_central: CH4 central CCGT +ch4-extchp_decentral: CH4 decentral CCGT +ch4-gt: CH4 GT +ch4-import: CH4 import +electricity-bev_charging: BEV charging +electricity-demand_hh: El. demand HH +electricity-demand_cts: El. demand CTS +electricity-demand_ind: El. demand IND +electricity-electrolyzer: Electrolyzer +electricity-export: El. export +electricity-heatpump_central: Heat pump (DH) +electricity-heatpump_decentral: Heat pump (domestic) +electricity-import: El. import +electricity-large_scale_battery: Large-scale battery +electricity-small_scale_battery: Small-scale battery +electricity-pth: res. PtH +h2-bpchp_central: H2 central CHP +h2-bpchp_decentral: H2 decentral CHP +h2-extchp_central: H2 central CCGT +h2-extchp_decentral: H2 decentral CCGT +h2-cavern: H2 cavern +h2-demand: H2 demand +h2-excess: H2 excess +h2-gt: H2 GT +h2-import: H2 import +h2-shortage: H2 import +heat_central-demand_hh: Heat cen. demand HH +heat_central-demand_cts: Heat cen. demand CTS +heat_central-demand_ind: Heat cen. demand IND +heat_central-storage: Heat cen. storage +heat_decentral-demand_hh: Heat dec. demand HH +heat_decentral-demand_cts: Heat dec. demand CTS +heat_decentral-demand_ind: Heat dec. demand IND +heat_decentral-storage: Heat dec. storage +hydro-ror: Hydro ROR +solar-pv_ground: PV ground +solar-pv_rooftop: PV rooftop +solar-thermalcollector_central: Heat cen. solar thermal collector +solar-thermalcollector_decentral: Heat dec. solar thermal collector +wind-onshore: Wind on +wood-shortage: Wood shortage +wood-extchp_central: Wood central CHP +wood-extchp_decentral: Wood decentral CHP +lignite-shortage: Lignite shortage diff --git a/digipipe/esys/esys/config/map_ts.yml b/digipipe/esys/esys/config/map_ts.yml new file mode 100644 index 00000000..dffee725 --- /dev/null +++ b/digipipe/esys/esys/config/map_ts.yml @@ -0,0 +1,76 @@ +load: { + "electricity-demand_hh-profile": { + "dataset": "demand_electricity_region", + "file": "demand_hh_power_timeseries.csv", + }, + "electricity-demand_cts-profile": { + "dataset": "demand_electricity_region", + "file": "demand_cts_power_timeseries.csv", + }, + "electricity-demand_ind-profile": { + "dataset": "demand_electricity_region", + "file": "demand_ind_power_timeseries.csv", + }, + "heat_central-demand_hh-profile": { + "dataset": "demand_heat_region", + "file": "demand_hh_heat_timeseries.csv", + }, + "heat_central-demand_cts-profile": { + "dataset": "demand_heat_region", + "file": "demand_cts_heat_timeseries.csv", + }, + "heat_central-demand_ind-profile": { + "dataset": "demand_heat_region", + "file": "demand_ind_heat_timeseries.csv", + }, + "heat_decentral-demand_hh-profile": { + "dataset": "demand_heat_region", + "file": "demand_hh_heat_timeseries.csv", + }, + "heat_decentral-demand_cts-profile": { + "dataset": "demand_heat_region", + "file": "demand_cts_heat_timeseries.csv", + }, + "heat_decentral-demand_ind-profile": { + "dataset": "demand_heat_region", + "file": "demand_ind_heat_timeseries.csv", + }, +} + +feedin: { + "hydro-ror-profile": { + "dataset": "renewable_feedin", + "file": "ror_feedin_timeseries.csv", + }, + "wind-onshore-profile": { + "dataset": "renewable_feedin", + "file": "wind_feedin_timeseries.csv", + }, + "solar-pv_ground-profile": { + "dataset": "renewable_feedin", + "file": "pv_feedin_timeseries.csv", + }, + "solar-pv_rooftop-profile": { + "dataset": "renewable_feedin", + "file": "pv_feedin_timeseries.csv", + }, + "electricity-bev_charging-profile": { + "dataset": "renewable_feedin", + "file": "pv_feedin_timeseries.csv", + }, + "solar-thermalcollector_central-profile": { + "dataset": "renewable_feedin", + "file": "st_feedin_timeseries.csv", + }, + "solar-thermalcollector_decentral-profile": { + "dataset": "renewable_feedin", + "file": "st_feedin_timeseries.csv", + }, +} + +efficiencies: { + "efficiency-profile": { + "dataset": "heatpump_cop", + "file": "heatpump_cop_timeseries.csv", + }, +} diff --git a/digipipe/esys/esys/config/settings.yaml b/digipipe/esys/esys/config/settings.yaml new file mode 100644 index 00000000..c7763b71 --- /dev/null +++ b/digipipe/esys/esys/config/settings.yaml @@ -0,0 +1,82 @@ +general: + prepare_resources_locally: True + plot_filetype: .png + ts_index_name: id_ts + scal_index_name: id_scal + separator: ";" + +labels: de + +build_datapackage: + el_gas_relation: electricity_gas_relation # appears in optimize as well + emission: emission + additional_scalars_file: additional_scalars.csv + overwrite_name: false + +optimize: + filename_metadata: datapackage.json + solver: cbc + solve_kwargs: + tee: True + keepfiles: True + write_lp_file: False + cmdline_options: + AllowableGap: 0.01 + debug: true + receive_duals: false + el_gas_relation: electricity_gas_relation # appears in build_datapackage as well + el_key: electricity # prefix of keywords for gas electricity relation + gas_key: gas # prefix of keywords for gas electricity relation + +create_empty_scalars: + non_regional: [ + "capacity_cost", + "carrier_cost", + "efficiency", + "expandable", + "marginal_cost", + "electric_efficiency", + "thermal_efficiency", + "condensing_efficiency", + "loss_rate", + "storage_capacity_cost", + ] + wacc: { + "var_name": "wacc", + "carrier": "ALL", + "region": "ALL", + "tech": "ALL", + "type": "ALL", + "var_unit": "", + } + emissions_not_modeled: { + "var_name": "emissions_not_modeled", + "carrier": "emission", + "region": "TOTAL", + "tech": "constraint", + "type": "limit", + } + emissions_1990: { + "var_name": "emissions_1990", + "carrier": "emission", + "region": "TOTAL", + "tech": "constraint", + "type": "limit", + } + emission_reduction_factor: { + "var_name": "emission_reduction_factor", + "carrier": "emission", + "region": "TOTAL", + "tech": "constraint", + "type": "limit", + } + drop_default_scalars: False + +create_empty_ts: + datetime_format: "%Y-%m-%d %H:%M:%S" + filter_ts: "empty_ts" + ts_values: "zeros" # Set to 'zeros' or 'empty' + overwrite: false + +write_costs_efficiencies: + delete_default: True diff --git a/digipipe/esys/esys/config/write_default_scalars.yml b/digipipe/esys/esys/config/write_default_scalars.yml new file mode 100644 index 00000000..e439e6eb --- /dev/null +++ b/digipipe/esys/esys/config/write_default_scalars.yml @@ -0,0 +1,155 @@ +write_default_scalars: + capacity_backpressure: { + "var_name": "capacity", + "type": backpressure, + "tech": None, + "which": "zeros", + "var_unit": "MW", + } + capacity_conversion: { + "var_name": "capacity", + "type": conversion, + "tech": None, + "which": "zeros", + "var_unit": "MW", + } + capacity_extraction: { + "var_name": "capacity", + "type": extraction, + "tech": None, + "which": "zeros", + "var_unit": "MW", + } + capacity_storage: { + "var_name": "capacity", + "type": storage, + "tech": None, + "which": "zeros", + "var_unit": "MW", + } + capacity_volatile: { + "var_name": "capacity", + "type": volatile, + "tech": None, + "which": "zeros", + "var_unit": "MW", + } + capacity_shortage: { + "var_name": "capacity", + "type": shortage, + "tech": None, + "which": "empty", + "var_unit": "MW", + } + capacity_excess: { + "var_name": "capacity", + "type": excess, + "tech": None, + "which": "empty", + "var_unit": "MW", + } + amount: { + "var_name": "amount", + "type": None, + "tech": None, + "which": "empty", + "var_unit": "MWh/a", + } + capacity_potential: { + "var_name": "capacity_potential", + "type": None, + "tech": None, + "which": "empty", + "var_unit": "MW", + } + carrier_cost: { + "var_name": "carrier_cost", + "type": None, + "tech": None, + "which": "zeros", + "var_unit": "Eur/MWh", + } + expandable: { + "var_name": "expandable", + "type": None, + "tech": None, + "which": "false", + "var_unit": "None", + } + input_parameters_storage: { + "var_name": "input_parameters", + "type": storage, + "tech": None, + "which": "variable_costs", + "var_unit": "None", + } + input_parameters: { + "var_name": "input_parameters", + "type": None, + "tech": None, + "which": "empty_dict", + "var_unit": "None", + } + marginal_cost_shortage: { + "var_name": "marginal_cost", + "type": shortage, + "tech": None, + "which": "high_costs", + "var_unit": "EUR/MWh", + } + marginal_cost_volatile: { + "var_name": "marginal_cost", + "type": volatile, + "tech": None, + "which": "zeros", + "var_unit": "EUR/MWh", + } + storage_capacity: { + "var_name": "storage_capacity", + "type": None, + "tech": None, + "which": "zeros", + "var_unit": "MWh", + } + storage_capacity_potential: { + "var_name": "storage_capacity_potential", + "type": None, + "tech": None, + "which": "empty", + "var_unit": "MWh", + } + emissions_not_modeled: { + "var_name": "emissions_not_modeled", + "type": None, + "tech": None, + "which": "emissions_not_modeled", + "var_unit": "kg_CO2_eq/MWh", + } + emissions_1990: { + "var_name": "emissions_1990", + "type": None, + "tech": None, + "which": "zeros", + "var_unit": "kg_CO2_eq/MWh", + } + emission_reduction_factor: { + "var_name": "emission_reduction_factor", + "type": None, + "tech": None, + "which": "emission_reduction_factor", + "var_unit": "None", + } +costs_efficiencies: [ + "capacity_cost_overnight", + "condensing_efficiency", + "efficiency", + "electric_efficiency", + "fixom_cost", + "lifetime", + "loss_rate", + "marginal_cost", + "storage_capacity_cost_overnight", + "storage_fixom_cost", + "thermal_efficiency", + "wacc" + ] diff --git a/digipipe/esys/esys/model/__init__.py b/digipipe/esys/esys/model/__init__.py new file mode 100644 index 00000000..723da1c1 --- /dev/null +++ b/digipipe/esys/esys/model/__init__.py @@ -0,0 +1,22 @@ +import os + +from oemoflex.tools.helpers import load_yaml + +here = os.path.dirname(os.path.abspath(__file__)) + +component_attrs_update = load_yaml( + os.path.join(here, "component_attrs_update.yml") +) + +bus_attrs_update = load_yaml(os.path.join(here, "bus_attrs_update.yml")) + +foreign_keys_update = load_yaml(os.path.join(here, "foreign_keys_update.yml")) + +MODEL_STRUCTURE_DIR = os.path.join(here, "model_structure") + +model_structures = { + os.path.splitext(f_name)[0]: load_yaml( + os.path.join(MODEL_STRUCTURE_DIR, f_name) + ) + for f_name in os.listdir(MODEL_STRUCTURE_DIR) +} diff --git a/digipipe/esys/esys/model/bus_attrs_update.yml b/digipipe/esys/esys/model/bus_attrs_update.yml new file mode 100644 index 00000000..02e06166 --- /dev/null +++ b/digipipe/esys/esys/model/bus_attrs_update.yml @@ -0,0 +1,31 @@ +heat_central: + balanced: + True + +heat_decentral: + balanced: + True + +ch4: + balanced: + True + +wood: + balanced: + True + +lignite: + balanced: + True + +biomass: + balanced: + True + +biogas: + balanced: + True + +#h2: +# balanced: +# True diff --git a/digipipe/esys/esys/model/component_attrs_update.yml b/digipipe/esys/esys/model/component_attrs_update.yml new file mode 100644 index 00000000..40161449 --- /dev/null +++ b/digipipe/esys/esys/model/component_attrs_update.yml @@ -0,0 +1,527 @@ +# Bus "electricity": + +solar-pv_ground: + carrier: solar + tech: pv_ground + type: volatile + foreign_keys: + bus: electricity + profile: solar-pv_ground-profile + defaults: + marginal_cost: 0 + output_parameters: "{}" + +solar-pv_rooftop: + carrier: solar + tech: pv_rooftop + type: volatile + foreign_keys: + bus: electricity + profile: solar-pv_rooftop-profile + defaults: + marginal_cost: 0 + output_parameters: "{}" + +electricity-import: + carrier: electricity + tech: import + type: shortage + foreign_keys: + bus: electricity + defaults: + output_parameters: "{}" + +electricity-export: + carrier: electricity + tech: export + type: excess + foreign_keys: + bus: electricity + defaults: + input_parameters: "{}" + +ch4-gt: + carrier: ch4 + tech: gt + type: conversion + foreign_keys: + from_bus: ch4 + to_bus: electricity + defaults: + output_parameters: "{}" + +ch4-extchp_central: + carrier: ch4 + tech: extchp_central + type: extraction + foreign_keys: + fuel_bus: ch4 + electricity_bus: electricity + heat_bus: heat_central + +ch4-extchp_decentral: + carrier: ch4 + tech: extchp_decentral + type: extraction + foreign_keys: + fuel_bus: ch4 + electricity_bus: electricity + heat_bus: heat_decentral + +wood-extchp_decentral: + carrier: wood + tech: extchp_decentral + type: extraction + foreign_keys: + fuel_bus: wood + electricity_bus: electricity + heat_bus: heat_decentral + +wood-extchp_central: + carrier: wood + tech: extchp_central + type: extraction + foreign_keys: + fuel_bus: wood + electricity_bus: electricity + heat_bus: heat_central + +ch4-bpchp_decentral: + carrier: ch4 + tech: bpchp_decentral + type: backpressure + foreign_keys: + fuel_bus: ch4 + electricity_bus: electricity + heat_bus: heat_decentral + +ch4-bpchp_central: + carrier: ch4 + tech: bpchp_central + type: backpressure + foreign_keys: + fuel_bus: ch4 + electricity_bus: electricity + heat_bus: heat_central + +biogas-bpchp_decentral: + carrier: biogas + tech: bpchp_decentral + type: backpressure + foreign_keys: + fuel_bus: biogas + electricity_bus: electricity + heat_bus: heat_decentral + +biogas-bpchp_central: + carrier: biogas + tech: bpchp_central + type: backpressure + foreign_keys: + fuel_bus: biogas + electricity_bus: electricity + heat_bus: heat_central + +electricity-bev_charging: + carrier: electricity + tech: bev_charging + type: load + foreign_keys: + bus: electricity + profile: electricity-bev_charging-profile + +electricity-large_scale_battery: + carrier: electricity + tech: large_scale_battery + type: storage + foreign_keys: + bus: electricity + defaults: + input_parameters: "{}" + output_parameters: "{}" + +electricity-small_scale_battery: + carrier: electricity + tech: small_scale_battery + type: storage + foreign_keys: + bus: electricity + defaults: + input_parameters: "{}" + output_parameters: "{}" + +electricity-demand_hh: + carrier: electricity + tech: demand_hh + type: load + foreign_keys: + bus: electricity + profile: electricity-demand_hh-profile + +electricity-demand_cts: + carrier: electricity + tech: demand_cts + type: load + foreign_keys: + bus: electricity + profile: electricity-demand_cts-profile + +electricity-demand_ind: + carrier: electricity + tech: demand_ind + type: load + foreign_keys: + bus: electricity + profile: electricity-demand_ind-profile + +electricity-heatpump_central: + carrier: electricity + tech: heatpump_central + type: conversion + foreign_keys: + from_bus: electricity + to_bus: heat_central + efficiency: efficiency-profile + defaults: + output_parameters: "{}" + +electricity-heatpump_decentral: + carrier: electricity + tech: heatpump_decentral + type: conversion + foreign_keys: + from_bus: electricity + to_bus: heat_decentral + efficiency: efficiency-profile + defaults: + output_parameters: "{}" + +electricity-pth_central: + carrier: electricity + tech: pth_central + type: conversion + foreign_keys: + from_bus: electricity + to_bus: heat_central + defaults: + output_parameters: "{}" + +electricity-pth_decentral: + carrier: electricity + tech: pth_decentral + type: conversion + foreign_keys: + from_bus: electricity + to_bus: heat_decentral + defaults: + output_parameters: "{}" + + + +# Bus "central heat": + +ch4-boiler_central: + carrier: ch4 + tech: boiler_central + type: conversion + foreign_keys: + from_bus: ch4 + to_bus: heat_central + defaults: + output_parameters: "{}" + +heat_central-demand_hh: + carrier: heat_central + tech: demand_hh + type: load + foreign_keys: + bus: heat_central + profile: heat_central-demand_hh-profile + +heat_central-demand_cts: + carrier: heat_central + tech: demand_cts + type: load + foreign_keys: + bus: heat_central + profile: heat_central-demand_cts-profile + +heat_central-demand_ind: + carrier: heat_central + tech: demand_ind + type: load + foreign_keys: + bus: heat_central + profile: heat_central-demand_ind-profile + +heat_central-storage: + carrier: heat_central + tech: storage + type: storage + foreign_keys: + bus: heat_central + defaults: + input_parameters: "{}" + output_parameters: "{}" + + + +# Bus "decentral heat": + +ch4-boiler_decentral: + carrier: ch4 + tech: boiler_decentral + type: conversion + foreign_keys: + from_bus: ch4 + to_bus: heat_decentral + defaults: + output_parameters: "{}" + +wood-oven: + carrier: wood + tech: oven + type: conversion + foreign_keys: + from_bus: wood + to_bus: heat_decentral + defaults: + output_parameters: "{}" + +lignite-oven: + carrier: lignite + tech: oven + type: conversion + foreign_keys: + from_bus: lignite + to_bus: heat_decentral + defaults: + output_parameters: "{}" + +solar-thermalcollector_central: + carrier: solar + tech: thermalcollector_central + type: volatile + foreign_keys: + bus: heat_central + profile: solar-thermalcollector_central-profile + defaults: + marginal_cost: 0 + output_parameters: "{}" + +solar-thermalcollector_decentral: + carrier: solar + tech: thermalcollector_decentral + type: volatile + foreign_keys: + bus: heat_decentral + profile: solar-thermalcollector_decentral-profile + defaults: + marginal_cost: 0 + output_parameters: "{}" + +heat_decentral-demand_hh: + carrier: heat_decentral + tech: demand_hh + type: load + foreign_keys: + bus: heat_decentral + profile: heat_decentral-demand_hh-profile + +heat_decentral-demand_cts: + carrier: heat_decentral + tech: demand_cts + type: load + foreign_keys: + bus: heat_decentral + profile: heat_decentral-demand_cts-profile + +heat_decentral-demand_ind: + carrier: heat_decentral + tech: demand_ind + type: load + foreign_keys: + bus: heat_decentral + profile: heat_decentral-demand_ind-profile + +heat_decentral-storage: + carrier: heat_decentral + tech: storage + type: storage + foreign_keys: + bus: heat_decentral + defaults: + input_parameters: "{}" + output_parameters: "{}" + + + +# Bus "ch4": + +ch4-import: + carrier: ch4 + tech: import + type: shortage + foreign_keys: + bus: ch4 + defaults: + output_parameters: "{}" + + + +# Bus "wood": + +wood-shortage: + carrier: wood + tech: shortage + type: shortage + foreign_keys: + bus: wood + defaults: + output_parameters: "{}" + + + +# Bus "lignite": + +lignite-shortage: + carrier: lignite + tech: shortage + type: shortage + foreign_keys: + bus: lignite + defaults: + output_parameters: "{}" + + + +# Bus "biomass": + +biomass-shortage: + carrier: biomass + tech: shortage + type: shortage + foreign_keys: + bus: biomass + defaults: + output_parameters: "{}" + +biomass-biogas_plant: + carrier: biomass + tech: biogas_plant + type: conversion + foreign_keys: + from_bus: biomass + to_bus: biogas + defaults: + output_parameters: "{}" + + + +# Bus "biogas": + +biogas-biogas_upgrading_plant: + carrier: biogas + tech: biogas_upgrading_plant + type: conversion + foreign_keys: + from_bus: biogas + to_bus: ch4 + defaults: + output_parameters: "{}" + + + +# Bus "h2": +# +#h2-import: +# carrier: h2 +# tech: import +# type: shortage +# foreign_keys: +# bus: h2 +# defaults: +# output_parameters: "{}" +# +#h2-shortage: +# carrier: h2 +# tech: shortage +# type: shortage +# foreign_keys: +# bus: h2 +# defaults: +# output_parameters: "{}" +# +#electricity-electrolyzer: +# carrier: electricity +# tech: electrolyzer +# type: conversion +# foreign_keys: +# from_bus: electricity +# to_bus: h2 +# defaults: +# output_parameters: "{}" +# +#h2-bpchp_central: +# carrier: h2 +# tech: bpchp_central +# type: backpressure +# foreign_keys: +# fuel_bus: h2 +# electricity_bus: electricity +# heat_bus: heat_central +# +#h2-bpchp_decentral: +# carrier: h2 +# tech: bpchp_decentral +# type: backpressure +# foreign_keys: +# fuel_bus: h2 +# electricity_bus: electricity +# heat_bus: heat_decentral +# +#h2-extchp_central: +# carrier: h2 +# tech: extchp_central +# type: extraction +# foreign_keys: +# fuel_bus: h2 +# electricity_bus: electricity +# heat_bus: heat_central +# +#h2-extchp_decentral: +# carrier: h2 +# tech: extchp_decentral +# type: extraction +# foreign_keys: +# fuel_bus: h2 +# electricity_bus: electricity +# heat_bus: heat_decentral +# +#h2-demand: +# carrier: h2 +# tech: demand +# type: load +# foreign_keys: +# bus: h2 +# profile: h2-demand-profile +# +#h2-cavern: +# carrier: h2 +# tech: cavern +# type: storage +# foreign_keys: +# bus: h2 +# defaults: +# input_parameters: "{}" +# output_parameters: "{}" +# +#h2-excess: +# carrier: h2 +# tech: excess +# type: excess +# foreign_keys: +# bus: h2 +# defaults: +# marginal_cost: 0 +# input_parameters: "{}" diff --git a/digipipe/esys/esys/model/foreign_keys_update.yml b/digipipe/esys/esys/model/foreign_keys_update.yml new file mode 100644 index 00000000..75b1961e --- /dev/null +++ b/digipipe/esys/esys/model/foreign_keys_update.yml @@ -0,0 +1,85 @@ +'bus': [ + 'biomass-shortage', + 'ch4-import', + 'electricity-bev_charging', + 'electricity-large_scale_battery', + 'electricity-small_scale_battery', + 'electricity-demand_hh', + 'electricity-demand_cts', + 'electricity-demand_ind', + 'electricity-export', + 'electricity-import', +# 'h2-cavern', +# 'h2-demand', +# 'h2-excess', +# 'h2-import', +# 'h2-shortage', + 'heat_central-demand_hh', + 'heat_central-demand_cts', + 'heat_central-demand_ind', + 'heat_central-storage', + 'heat_decentral-demand_hh', + 'heat_decentral-demand_cts', + 'heat_decentral-demand_ind', + 'heat_decentral-storage', + 'wood-shortage', + 'lignite-shortage', + 'solar-pv_ground', + 'solar-pv_rooftop', + 'solar-thermalcollector_central', + 'solar-thermalcollector_decentral', +] + +'profile': [ + 'electricity-bev_charging', + 'electricity-demand_hh', + 'electricity-demand_cts', + 'electricity-demand_ind', +# 'h2-demand', + 'heat_central-demand_hh', + 'heat_central-demand_cts', + 'heat_central-demand_ind', + 'heat_decentral-demand_hh', + 'heat_decentral-demand_cts', + 'heat_decentral-demand_ind', + 'solar-pv_ground', + 'solar-pv_rooftop', + 'solar-thermalcollector_central', + 'solar-thermalcollector_decentral', +] + +'from_to_bus': [ + 'ch4-boiler_central', + 'ch4-boiler_decentral', + 'ch4-gt', + 'biomass-biogas_plant', + 'biogas-biogas_upgrading_plant', +# 'electricity-electrolyzer', +# 'h2-gt', + 'electricity-heatpump_central', + 'electricity-heatpump_decentral', + 'electricity-pth_decentral', + 'electricity-pth_central', + 'wood-oven', + 'lignite-oven', +] + +'chp': [ + 'ch4-bpchp_central', + 'ch4-bpchp_decentral', + 'ch4-extchp_central', + 'ch4-extchp_decentral', + 'biogas-bpchp_central', + 'biogas-bpchp_decentral', + 'wood-extchp_central', + 'wood-extchp_decentral', +# 'h2-bpchp_central', +# 'h2-bpchp_decentral', +# 'h2-extchp_central', +# 'h2-extchp_decentral', +] + +'efficiency': [ + 'electricity-heatpump_central', + 'electricity-heatpump_decentral', +] diff --git a/digipipe/esys/esys/model/model_structure/model_structure_el_only.yml b/digipipe/esys/esys/model/model_structure/model_structure_el_only.yml new file mode 100644 index 00000000..74442c15 --- /dev/null +++ b/digipipe/esys/esys/model/model_structure/model_structure_el_only.yml @@ -0,0 +1,40 @@ +regions: + - ABW + +links: + +busses: + - ch4 + - electricity + - biomass + - biogas +# - h2 + +components: + - biomass-shortage + - electricity-import + - electricity-export + - electricity-bev_charging + - electricity-demand_hh + - electricity-demand_cts + - electricity-demand_ind +# - electricity-electrolyzer + - electricity-large_scale_battery + - electricity-small_scale_battery + + - ch4-import + - ch4-gt + + - biomass-biogas_plant + - biogas-biogas_upgrading_plant +# - h2-cavern +# - h2-demand +# - h2-excess +# - h2-gt +# - h2-import +# - h2-shortage + + - solar-pv_ground + - solar-pv_rooftop + - wind-onshore + - hydro-ror diff --git a/digipipe/esys/esys/model/model_structure/model_structure_full.yml b/digipipe/esys/esys/model/model_structure/model_structure_full.yml new file mode 100644 index 00000000..8babe91d --- /dev/null +++ b/digipipe/esys/esys/model/model_structure/model_structure_full.yml @@ -0,0 +1,82 @@ +regions: + - ABW + +links: + +busses: + - ch4 + - electricity + - heat_central + - heat_decentral + - wood + - lignite + - biomass + - biogas +# - h2 + +components: + - electricity-import + - electricity-export + - electricity-bev_charging + - electricity-demand_hh + - electricity-demand_cts + - electricity-demand_ind +# - electricity-electrolyzer + - electricity-heatpump_central + - electricity-heatpump_decentral + - electricity-large_scale_battery + - electricity-small_scale_battery + - electricity-pth_central + - electricity-pth_decentral + + - ch4-import + - ch4-boiler_central + - ch4-boiler_decentral + - ch4-bpchp_central + - ch4-bpchp_decentral + - ch4-gt + - ch4-extchp_central + - ch4-extchp_decentral + + - biomass-biogas_plant + - biomass-shortage + + - biogas-bpchp_central + - biogas-bpchp_decentral + - biogas-biogas_upgrading_plant + +# - h2-cavern +# - h2-demand +# - h2-excess +# - h2-bpchp_central +# - h2-bpchp_decentral +# - h2-gt +# - h2-extchp_central +# - h2-extchp_decentral +# - h2-import +# - h2-shortage + + - wood-oven + - wood-extchp_central + - wood-extchp_decentral + - wood-shortage + + - lignite-shortage + - lignite-oven + + - heat_central-demand_hh + - heat_central-demand_cts + - heat_central-demand_ind + - heat_central-storage + + - heat_decentral-demand_hh + - heat_decentral-demand_cts + - heat_decentral-demand_ind + - heat_decentral-storage + + - solar-pv_ground + - solar-pv_rooftop + - solar-thermalcollector_central + - solar-thermalcollector_decentral + - wind-onshore + - hydro-ror diff --git a/digipipe/esys/esys/schema/__init__.py b/digipipe/esys/esys/schema/__init__.py new file mode 100644 index 00000000..94515d39 --- /dev/null +++ b/digipipe/esys/esys/schema/__init__.py @@ -0,0 +1,30 @@ +import os + +import pandas as pd + +here = os.path.dirname(__file__) + + +class B3Schema: + def __init__(self, index, columns): + self.index = index + self.columns = columns + + @classmethod + def load_from_csv(cls, path): + df = pd.read_csv(path, delimiter=";") + + df.columns.name = "field" + + df.index = ["type", "description"] + + index = df.iloc[:, 0] + + columns = df.iloc[:, 1:] + + return cls(index, columns) + + +SCHEMA_SCAL = B3Schema.load_from_csv(os.path.join(here, "scalars.csv")) + +SCHEMA_TS = B3Schema.load_from_csv(os.path.join(here, "timeseries.csv")) diff --git a/digipipe/esys/esys/schema/scalars.csv b/digipipe/esys/esys/schema/scalars.csv new file mode 100644 index 00000000..0370587e --- /dev/null +++ b/digipipe/esys/esys/schema/scalars.csv @@ -0,0 +1,3 @@ +id_scal;scenario_key;name;var_name;carrier;region;tech;type;var_value;var_unit;source;comment +int;str;str;str;str;str;str;str;float or int;str;str;str +Numerical index;Key;Unique name of the oemof component;Variable name;Energy carrier;Region;Description;oemof.tabular type;Value;Unit of the variable;Source of the data;Additional information or notes such as the license or scope of validity diff --git a/digipipe/esys/esys/schema/timeseries.csv b/digipipe/esys/esys/schema/timeseries.csv new file mode 100644 index 00000000..268e4628 --- /dev/null +++ b/digipipe/esys/esys/schema/timeseries.csv @@ -0,0 +1,3 @@ +id_ts;scenario_key;region;var_name;timeindex_start;timeindex_stop;timeindex_resolution;series;var_unit;source;comment +int;str;str;str;str;str;str;str;str;str;str +Numerical index;Key;Region;Variable name;Start of the timeindex;Stop of the timeindex;Time increment;Series;Unit of the variable;Source of the data;Additional information or notes such as the license or scope of validity diff --git a/digipipe/esys/esys/tools/data_processing.py b/digipipe/esys/esys/tools/data_processing.py new file mode 100644 index 00000000..effa2d5f --- /dev/null +++ b/digipipe/esys/esys/tools/data_processing.py @@ -0,0 +1,1465 @@ +# coding: utf-8 +r""" +This module contains helper functions for processing the data in digipipe, such +as loading, filtering, sorting, merging, aggregating and saving. +""" + +import ast +import os +import warnings + +import numpy as np +import oemof.tabular.facades +import pandas as pd + +from digipipe.esys.esys import schema +from digipipe.esys.esys.config import esys_conf + +logger = esys_conf.add_snake_logger("data_processing") + +here = os.path.dirname(__file__) + +template_dir = os.path.join(here, "..", "schema") + +HEADER_B3_SCAL = schema.SCHEMA_SCAL.columns.columns + +HEADER_B3_TS = schema.SCHEMA_TS.columns.columns + + +def sort_values(df, reset_index=True): + _df = df.copy() + + _df = _df.sort_values( + by=["scenario_key", "carrier", "tech", "var_name", "region"] + ) + + if reset_index: + _df = _df.reset_index(drop=True) + + _df.index.name = esys_conf.settings.general.scal_index_name + + return _df + + +def sum_series(series): + """ + Enables ndarray summing into one list + """ + summed_series = sum(series) + if isinstance(summed_series, np.ndarray): + return summed_series.tolist() + else: + return summed_series + + +def get_list_diff(list_a, list_b): + r""" + Returns all items of list_a that are not in list_b. + + Parameters + ---------- + list_a : list + First list + list_b : list + Second list + Returns + ------- + list_a_diff_b : list + List of all items in list_a that are not in list_b. + """ + return list(set(list_a).difference(set(list_b))) + + +def format_header(df, header, index_name): + r""" + Formats columns of a DataFrame according to a specified header and index + name. Fills missing columns with NaN. In case there are columns that are + not in header, an error is raised. + + Parameters + ---------- + df : pd.DataFrame + DataFrame to format + header : list + List of columns + index_name : str + Name of the index + + Returns + ------- + df_formatted : pd.DataFrame + """ + _df = df.copy() + + extra_colums = get_list_diff(_df.columns, header) + + if index_name in extra_colums: + _df = _df.set_index(index_name, drop=True) + extra_colums = get_list_diff(_df.columns, header) + else: + _df.index.name = index_name + + if extra_colums: + raise ValueError(f"There are extra columns {extra_colums}") + + missing_columns = get_list_diff(header, _df.columns) + + for col in missing_columns: + _df.loc[:, col] = np.nan + + try: + df_formatted = _df[header] + + except KeyError: + raise KeyError("Failed to format data according to specified header.") + + return df_formatted + + +def load_b3_scalars(path, sep=esys_conf.settings.general.separator): + """ + This function loads scalars from a csv file. + + Parameters + ---------- + path : str + path of input file of csv format + sep : str + column separator + + Returns + ------- + df : pd.DataFrame + DataFrame with loaded scalars + """ + # Read data + df = pd.read_csv(path, sep=sep) + + df["var_value"] = pd.to_numeric(df["var_value"], errors="coerce").fillna( + df["var_value"] + ) + + df = format_header( + df, HEADER_B3_SCAL, esys_conf.settings.general.scal_index_name + ) + + return df + + +def load_b3_timeseries(path, sep=esys_conf.settings.general.separator): + """ + This function loads a stacked time series from a csv file. + + Parameters + ---------- + path : str + path of input file of csv format + sep : str + column separator + + Returns + ------- + df : pd.DataFrame + DataFrame with loaded time series + """ + # Read data + df = pd.read_csv(path, sep=sep) + + df = format_header( + df, HEADER_B3_TS, esys_conf.settings.general.ts_index_name + ) + + df.loc[:, "series"] = df.loc[:, "series"].apply( + lambda x: ast.literal_eval(x), 1 + ) + + return df + + +def _multi_load(paths, load_func): + r""" + Wraps a load_func to allow loading several dataframes at once. + + Parameters + ---------- + paths : str or list of str + Path or list of paths to data. + load_func : func + A function that is able to load data from a single path + + Returns + ------- + result : pd.DataFrame + DataFrame containing the concatenated results + """ + if isinstance(paths, list): + pass + elif isinstance(paths, str): + return load_func(paths) + else: + raise ValueError(f"{paths} has to be either list of paths or path.") + + dfs = [] + for path in paths: + df = load_func(path) + dfs.append(df) + + result = pd.concat(dfs) + + return result + + +def multi_load_b3_scalars(paths): + r""" + Loads scalars from several csv files. + + Parameters + ---------- + paths : str or list of str + Path or list of paths to data. + + Returns + ------- + pd.DataFrame + """ + return _multi_load(paths, load_b3_scalars) + + +def multi_load_b3_timeseries(paths): + r""" + Loads stacked timeseries from several csv files. + + Parameters + ---------- + paths : str or list of str + Path or list of paths to data. + + Returns + ------- + pd.DataFrame + """ + return _multi_load(paths, load_b3_timeseries) + + +def save_df(df, path): + """ + This function saves data to a csv file. + + Parameters + ---------- + df : pd.DataFrame + DataFrame to be saved + + path : str + Path to save the csv file + """ + # Save scalars to csv file + df.to_csv(path, index=True, sep=esys_conf.settings.general.separator) + + # Print user info + logger.info(f"The DataFrame has been saved to: {path}.") + + +def load_tabular_results_scal(path, sep=esys_conf.settings.general.separator): + r""" + Loads scalars as given by oemof.tabular/oemoflex. + + Parameters + ---------- + paths : str or list of str + Path or list of paths to data. + + Returns + ------- + pd.DataFrame + """ + return pd.read_csv(path, header=[0], sep=sep) + + +def load_tabular_results_ts(path, sep=esys_conf.settings.general.separator): + r""" + Loads timeseries as given by oemof.tabular/oemoflex. + + Parameters + ---------- + paths : str or list of str + Path or list of paths to data. + + Returns + ------- + pd.DataFrame + """ + return pd.read_csv( + path, header=[0, 1, 2], parse_dates=[0], index_col=[0], sep=sep + ) + + +def filter_df(df, column_name, values, inverse=False): + """ + This function filters a DataFrame. + + Parameters + ---------- + df : pd.DataFrame + DataFrame + column_name : string + The column's name to filter. + values : str/numeric/list + String, number or list of strings or numbers to filter by. + inverse : Boolean + If True, the entries for `column_name` and `values` are dropped + and the rest of the DataFrame be retained. + + Returns + ------- + df_filtered : pd.DataFrame + Filtered data. + """ + _df = df.copy() + + if isinstance(values, list): + where = _df[column_name].isin(values) + + else: + where = _df[column_name] == values + + if inverse: + where = ~where + + df_filtered = _df.loc[where] + + return df_filtered + + +def multi_filter_df(df, **kwargs): + r""" + Applies several filters in a row to a DataFrame. + + Parameters + ---------- + df : pd.DataFrame + Data in oemof_b3 format. + kwargs : Additional keyword arguments + Filters to apply + + Returns + ------- + filtered_df : pd.DataFrame + Filtered data + """ + filtered_df = df.copy() + for key, value in kwargs.items(): + filtered_df = filter_df(filtered_df, key, value) + return filtered_df + + +def multi_filter_df_simultaneously(df, inverse=False, **kwargs): + r""" + Applies several filters simultaneously to a DataFrame. + + Parameters + ---------- + df : pd.DataFrame + Data in oemof_b3 format. + inverse : bool + If True, matching entries are dropped + and the rest of the DataFrame kept. + kwargs : Additional keyword arguments + Filters to apply + + Returns + ------- + filtered_df : pd.DataFrame + Filtered data + """ + _df = df.copy() + + all_wheres = [] + + for key, value in kwargs.items(): + if isinstance(value, list): + where = _df[key].isin(value) + + else: + where = _df[key] == value + + all_wheres.append(where) + + all_wheres = pd.concat(all_wheres, 1).all(1) + + if inverse: + all_wheres = ~all_wheres + + df_filtered = _df.loc[all_wheres] + + return df_filtered + + +def update_filtered_df(df, filters): + r""" + Accepts an oemof-b3 Dataframe, filters it, subsequently update + the result with data filtered with other filters. + + Parameters + ---------- + df : pd.DataFrame + Scalar data in oemof-b3 format to filter + filters : dict of dict + Several filters to be applied subsequently + + Returns + ------- + filtered : pd.DataFrame + """ + assert isinstance(filters, dict) + for value in filters.values(): + assert isinstance(value, dict) + + # Prepare empty dataframe to be updated with filtered data + filtered_updated = pd.DataFrame(columns=HEADER_B3_SCAL) + filtered_updated.index.name = esys_conf.settings.general.scal_index_name + + for iteration, filter in filters.items(): + logger.info(f"Applying set of filters no {iteration}.") + + # Apply set of filters + filtered = multi_filter_df(df, **filter) + + # Update result with new filtered data + filtered_updated = merge_a_into_b( + filtered, + filtered_updated, + how="outer", + on=["name", "region", "carrier", "tech", "var_name"], + verbose=False, + ) + + # inform about filtering updating + logger.info(f"Updated data with data filtered by {filter}") + + return filtered_updated + + +def isnull_any(df): + return df.isna().any().any() + + +def aggregate_units(units): + r""" + This function checks if units that should be aggregated are unique. + If they are not, it raises an error. If they are, it returns the unique + unit. + + Parameters + ---------- + units: + pd.Series of units + + Returns + ------- + unique_unit : str + Unique unit + """ + unique_units = units.unique() + + if len(unique_units) > 1: + raise ValueError("Units are not consistent!") + else: + return unique_units[0] + + +def aggregate_data(df, groupby, agg_method=None): + r""" + This functions aggregates data in oemof-B3-resources format and sums up + by region, carrier, tech or type. + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + groupby : list + The columns to group df by + agg_method : dict + Dictionary to specify aggregation method. + + Returns + ------- + df_aggregated : pd.DataFrame + Aggregated data. + """ + # Groupby and aggregate + return df.groupby(groupby, sort=False, dropna=False).agg(agg_method) + + +def aggregate_scalars(df, columns_to_aggregate, agg_method=None): + r""" + This functions aggregates scalar data in oemof-B3-resources format and sums + up by region, carrier, tech or type. + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + columns_to_aggregate : string or list + The columns to sum together ('region', 'carrier', 'tech' or 'type). + agg_method : dict + Dictionary to specify aggregation method. + + Returns + ------- + df_aggregated : pd.DataFrame + Aggregated data. + """ + _df = df.copy() + + _df = format_header( + _df, HEADER_B3_SCAL, esys_conf.settings.general.scal_index_name + ) + + if not isinstance(columns_to_aggregate, list): + columns_to_aggregate = [columns_to_aggregate] + + # Define the columns that are split and thus not aggregated + groupby = ["scenario_key", "carrier", "region", "tech", "type", "var_name"] + + groupby = list(set(groupby).difference(set(columns_to_aggregate))) + + # Define how to aggregate if + if not agg_method: + agg_method = { + "var_value": sum, + "name": lambda x: "None", + "var_unit": aggregate_units, + } + + df_aggregated = aggregate_data(df, groupby, agg_method) + + # Assign "ALL" to the columns that where aggregated. + for col in columns_to_aggregate: + df_aggregated[col] = "All" + + # Reset the index + df_aggregated.reset_index(inplace=True) + + df_aggregated = format_header( + df_aggregated, + HEADER_B3_SCAL, + esys_conf.settings.general.scal_index_name, + ) + + return df_aggregated + + +def aggregate_timeseries(df, columns_to_aggregate, agg_method=None): + r""" + This functions aggregates timeseries data in oemof-B3-resources format and + sums up by region, carrier, tech or type. + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + columns_to_aggregate : string or list + The columns to sum together ('region', 'carrier', 'tech' or 'type). + agg_method : dict + Dictionary to specify aggregation method. + + Returns + ------- + df_aggregated : pd.DataFrame + Aggregated data. + """ + _df = df.copy() + + _df = format_header( + _df, HEADER_B3_TS, esys_conf.settings.general.ts_index_name + ) + _df.series = _df.series.apply(lambda x: np.array(x)) + + if not isinstance(columns_to_aggregate, list): + columns_to_aggregate = [columns_to_aggregate] + + # Define the columns that are split and thus not aggregated + groupby = [ + "scenario_key", + "region", + "var_name", + "timeindex_start", + "timeindex_stop", + "timeindex_resolution", + ] + + groupby = list(set(groupby).difference(set(columns_to_aggregate))) + + # Define how to aggregate if + if not agg_method: + agg_method = { + "series": sum_series, + "var_unit": aggregate_units, + } + + df_aggregated = aggregate_data(_df, groupby, agg_method) + + # Assign "ALL" to the columns that where aggregated. + for col in columns_to_aggregate: + df_aggregated[col] = "All" + + # Reset the index + df_aggregated.reset_index(inplace=True) + + df_aggregated = format_header( + df_aggregated, HEADER_B3_TS, esys_conf.settings.general.ts_index_name + ) + + return df_aggregated + + +def prepare_attr_name(sc, overwrite): + r""" + This function handles the values of the attribute 'name'. + + It ensures that the name is + 1. set (according to convention) where name is empty and region is fixed + and + 2. checked for all values that are not None. + + If 'overwrite' is true the names will be overwritten with names set + according to the convention. Otherwise the names passed by the user are + used. + + Parameters + ---------- + sc : pd.DataFrame + DataFrame with scalar data in oemof-B3-resources format. + overwrite : Boolean + True if names are overwritten otherwise False. + + Returns + ------- + scalars_set_name : pd.DataFrame + DataFrame made of concatenated DataFrames with formatted names. + + """ + + def get_name(region, carrier, tech): + r""" + This function gets name according to oemof-b3's-naming convention: + --. + + Parameters + ---------- + region : str + region + carrier : str + carrier + tech : str + technology + + Returns + ------- + String containing name according to convention eg. B-ch4-gt. + + """ + return f"{region}-{carrier}-{tech}" + + def get_name_for_df(df): + r""" + This function returns a series of names generated from the convention. + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + + Returns + ------- + pd.Series with names set according to convention in get_name. + + """ + # Check if carrier, region and tech exist as columns + if {"carrier", "region", "tech"}.issubset(df.columns): + return df.apply( + lambda x: get_name(x["region"], x["carrier"], x["tech"]), 1 + ) + else: + raise KeyError( + "Please provide a DataFrame that conforms to " + "oemof-B3-resources-format." + ) + + def check_name(df): + r""" + This function checks whether a name given by the user matches the one + from the oemof-B3 naming convention. It prints a warning if expected + differs from given names and prints a list with expected names. + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + + """ + # Get name of Dataframe + name_as_given = df["name"] + + # Get name according to convention + name_generated = get_name_for_df(df) + + # Get diff of names + diff_in_name = compare_scalar_data(name_as_given, name_generated) + + # Save unique values of diff to a list and print as warning + expected_names = list(diff_in_name.unique()) + diff_names = list( + compare_scalar_data(name_generated, name_as_given).unique() + ) + if expected_names: + logger.warning( + "The name you have set for some of your scalar data differs " + "from the convention (--). \n" + "We expected but could not find the following name(s): " + f"{expected_names}." + "\nInstead we got the name(s): " + f"{diff_names}" + ) + if overwrite: + logger.warning( + "The names will be overwritten with names following the" + "convention" + ) + + def set_name(df, overwrite): + r""" + This function + 1. checks the name if the name is not empty and + 2. overwrites the name with the generated name if overwrite is true + or the name is empty + + Parameters + ---------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format. + + Returns + ------- + df : pd.DataFrame + DataFrame in oemof-B3-resources format and formatted name. + + """ + all_empty = df["name"].isnull().values.all() + if not all_empty: + check_name(df) + + elif all_empty or overwrite: + name_generated = get_name_for_df(df) + _df = df.copy() # To avoid SettingWithCopyWarning + _df.loc[:, "name"] = name_generated + df = _df + + return df + + def compare_scalar_data(sc_1, sc_2): + r""" + This functions compares the column of two DataFrames + It returns a DataFrame with scalars that diverge in name convention. + + Parameters + ---------- + sc_1 : pd.Series + Series with given values + sc_2 : pd.Series + Series with expected values + + Returns + ------- + diff_name_sc : pd.Series + Series where expected values not found + """ + diff_name_sc = sc_1.compare(sc_2) + + return diff_name_sc["other"] + + # PART 1: Ensure name is set (according to convention) where name is empty + # and region is fixed + # Save values where name is None and region is not "ALL" in new DataFrame + sc_wo_name = sc[sc["name"].isnull()] + sc_add_name = set_name(sc_wo_name, overwrite) + + # PART 2: Ensure name is checked for all values that are not None and where + # region is fixed + sc_with_name = sc[sc["name"].notnull()] + sc_with_name = set_name(sc_with_name, overwrite) + + # PART 3: Concatenate DataFrame with corrected name and DataFrame with set + # name + scalars_set_name = pd.concat([sc_with_name, sc_add_name]) + + return scalars_set_name + + +def expand_regions(scalars, regions, where="ALL"): + r""" + Expects scalars in oemof_b3 format (defined in + ''oemof_b3/schema/scalars.csv'') and regions. + Returns scalars with new rows included for each region in those places + where region equals + `where`. + + Parameters + ---------- + scalars : pd.DataFrame + Data in oemof_b3 format to expand + regions : list + List of regions + where : str + Key that should be expanded + Returns + ------- + sc_with_region : pd.DataFrame + Data with expanded regions in oemof_b3 format + """ + _scalars = format_header( + scalars, HEADER_B3_SCAL, esys_conf.settings.general.scal_index_name + ) + + sc_with_region = _scalars.loc[scalars["region"] != where, :].copy() + + sc_wo_region = _scalars.loc[scalars["region"] == where, :].copy() + + # REGIONALIZATION + if not sc_wo_region.empty: + # Ensure name is empty if region is 'ALL' + # Print user warning if name is not NaN and region is "ALL" + name_overwritten = sc_wo_region[sc_wo_region["name"].notnull()][ + "name" + ].values + if not sc_wo_region["name"].isnull().values.all(): + print( + "User warning: Please leave 'name' empty if you set 'region' " + "to 'ALL'.\n" + "The name you have specified " + f"{name_overwritten} " + f"will be overwritten." + ) + + # Set region + for region in regions: + regionalized = sc_wo_region.copy() + regionalized["region"] = region + + sc_with_region = pd.concat([sc_with_region, regionalized]) + + sc_with_region = sc_with_region.reset_index(drop=True) + + sc_with_region.index.name = esys_conf.settings.general.scal_index_name + + return sc_with_region + + +def merge_a_into_b(df_a, df_b, on, how="left", indicator=False, verbose=True): + r""" + Writes scalar data from df_a into df_b, according to 'on'. Where df_a + provides no data, the values of df_b are used. If how='outer', data from + df_a that is not in df_b will be kept. + + Parameters + ---------- + df_a : pd.DataFrame + DataFrame in oemof_b3 scalars format + df_b : pd.DataFrame + DataFrame in oemof_b3 scalars format + on : list + List of columns to merge on + how : str + 'left' or 'outer'. Default: 'left' + indicator : bool + If True, an indicator column is included. Default: False + + Returns + ------- + merged : pd.DataFrame + DataFrame in oemof_b3 scalars format. + """ + _df_a = df_a.copy() + _df_b = df_b.copy() + + # save df_b's index name and column order + df_b_index_name = _df_b.index.name + df_b_columns = list(_df_b.columns) + if indicator: + df_b_columns.append("_merge") + + # Give some information on how the merge affects the data + set_index_a = set( + map(tuple, pd.Index(_df_a.loc[:, on].replace(np.nan, "NaN"))) + ) + set_index_b = set( + map(tuple, pd.Index(_df_b.loc[:, on].replace(np.nan, "NaN"))) + ) + + if verbose: + a_not_b = set_index_a.difference(set_index_b) + if a_not_b: + if how == "left": + logger.warning( + f"There are {len(a_not_b)} elements in df_a but not in " + "df_b and are lost (choose how='outer' to keep them): " + f"{a_not_b}" + ) + elif how == "outer": + logger.info( + f"There are {len(a_not_b)} elements in df_a that are" + f" added to df_b: {a_not_b}" + ) + + a_and_b = set_index_a.intersection(set_index_b) + logger.info( + f"There are {len(a_and_b)} elements in df_b that are updated by " + "df_a." + ) + + b_not_a = set_index_b.difference(set_index_a) + logger.info( + f"There are {len(b_not_a)} elements in df_b that are unchanged: " + f"{b_not_a}" + ) + + # Merge a with b, ignoring all data in b + merged = _df_b.drop(columns=_df_b.columns.drop(on)).merge( + _df_a, + on=on, + how=how, + indicator=indicator, + sort=False, + ) + + merged.index.name = df_b_index_name + + # Where df_a contains no data, use df_b + merged = merged.reset_index().set_index( + on + ) # First reset, then set index to keep it as a column + + merged.update(_df_b.set_index(on), overwrite=False) + + # Set original index and recover column order + merged = merged.reset_index().set_index(df_b_index_name) + + merged = merged[df_b_columns] + + return merged + + +def check_consistency_timeindex(df, index): + """ + This function assert that values of a column in a stacked DataFrame are + same for all time steps. + + Parameters + ---------- + df : pandas.DataFrame + DataFrame for which the time index is checked + index : string + Index of values to be checked in the DataFrame + + Returns + ------- + value : string + Single value of the series of duplicates + """ + if index == "timeindex_start": + name = "start date" + elif index == "timeindex_stop": + name = "end date" + elif index == "timeindex_resolution": + name = "frequency" + + if np.all(df[index].array == df[index].array[0]): + value = df[index].array[0] + if value is None: + raise TypeError( + f"Your provided data is missing a {name}." + f"Please make sure you pass the {name} with {index}." + ) + else: + return value + else: + raise ValueError( + f"The {name} of your provided data doesn't match for all entries. " + f"Please make sure to pass the {name} with {index}." + ) + + +def stack_timeseries(df): + """ + This function stacks a Dataframe in a form where one series resides in one + row. + + Parameters + ---------- + df : pandas.DataFrame + DataFrame to be stacked + + Returns + ------- + df_stacked : pandas.DataFrame + Stacked DataFrame + """ + _df = df.copy() + + # Assert that _df has a timeindex + if not isinstance(_df.index, pd.DatetimeIndex): + raise TypeError( + "Your data should have a time series as an index of the format " + "'%Y-%m-%d %H:%M:%S'." + ) + + # Assert that frequency match for all time steps + if pd.infer_freq(_df.index) is None: + raise TypeError( + "No frequency of your provided data could be detected." + "Please provide a DataFrame with a specific frequency (eg. 'H' or " + "'T')." + ) + + _df_freq = pd.infer_freq(_df.index) + if _df.index.freqstr is None: + logger.info( + f"The frequency of your data is not specified in the DataFrame, " + f"but is of the following frequency alias: {_df_freq}. " + f"The frequency of your DataFrame is therefore automatically set " + f"to the frequency with this alias." + ) + _df = _df.asfreq(_df_freq) + + # Stack timeseries + df_stacked_cols = [ + "var_name", + "timeindex_start", + "timeindex_stop", + "timeindex_resolution", + "series", + ] + + df_stacked = pd.DataFrame(columns=df_stacked_cols) + + timeindex_start = _df.index.values[0] + timeindex_stop = _df.index.values[-1] + + for column in df.columns: + var_name = column + timeindex_resolution = _df[column].index.freqstr + series = [list(_df[column].values)] + + column_data = [ + var_name, + timeindex_start, + timeindex_stop, + timeindex_resolution, + series, + ] + + dict_stacked_column = dict(zip(df_stacked_cols, column_data)) + df_stacked_column = pd.DataFrame(data=dict_stacked_column) + df_stacked = pd.concat( + [df_stacked, df_stacked_column], ignore_index=True + ) + + # Save name of the index in the unstacked DataFrame as name of the index of + # "timeindex_start" column of stacked DataFrame, so that it can be + # extracted from it when unstacked again. + df_stacked["timeindex_start"].index.name = _df.index.name + + return df_stacked + + +def unstack_timeseries(df): + """ + This function unstacks a Dataframe so that there is a row for each value. + + Parameters + ---------- + df : pandas.DataFrame + DataFrame to be unstacked + + Returns + ------- + df_unstacked : pandas.DataFrame + Unstacked DataFrame + """ + _df = df.copy() + + # Assert that frequency match for all time steps + frequency = check_consistency_timeindex(_df, "timeindex_resolution") + timeindex_start = check_consistency_timeindex(_df, "timeindex_start") + timeindex_stop = check_consistency_timeindex(_df, "timeindex_stop") + + # Warn user if "source" or "comment" in columns of stacked DataFrame + # These two columns will be lost once unstacked + lost_columns = ["source", "comment"] + for col in lost_columns: + if col in list(df.columns): + if not _df[col].isna().all() or _df[col].values.all() == "None": + logger.warning( + f"Caution any remarks in column '{col}' are lost after " + f"unstacking." + ) + + # Process values of series + values_series = [] + for row in _df.iterrows(): + values_series.append(row[1]["series"]) + + values_array = np.array(values_series).transpose() + + # Unstack timeseries + df_unstacked = pd.DataFrame( + values_array, + columns=list(_df["var_name"]), + index=pd.date_range(timeindex_start, timeindex_stop, freq=frequency), + ) + + # Get and set index name from and to index name of "timeindex_start". + # If it existed in the origin DataFrame, which has been stacked, it will be + # set to that one. + df_unstacked.index.name = _df["timeindex_start"].index.name + + return df_unstacked + + +def unstack_var_name(df): + r""" + Given a DataFrame in oemof_b3 scalars format, this function will unstack + the variables. The returned DataFrame will have one column for each + var_name. + + Parameters + ---------- + df : pd.DataFrame + Stacked scalar data. + Returns + ------- + unstacked : pd.DataFrame + Unstacked scalar data. + """ + _df = df.copy() + + _df = format_header( + _df, HEADER_B3_SCAL, esys_conf.settings.general.scal_index_name + ) + + _df = _df.set_index( + [ + "scenario_key", + "name", + "region", + "carrier", + "tech", + "type", + "var_name", + ] + ) + + unstacked = _df.unstack("var_name") + + new_index = _df.index.droplevel(-1).unique() + unstacked = unstacked.reindex(new_index) + + return unstacked + + +def stack_var_name(df): + r""" + Given a DataFrame, this function will stack the variables and format + the results in b3-format. + + Parameters + ---------- + df : pd.DataFrame + DataFrame with one column per variable + + Returns + ------- + stacked : pd.DataFrame + DataFrame with a column "var_name" and "var_value" + """ + assert isinstance(df, pd.DataFrame) + + _df = df.copy() + + _df.columns.name = "var_name" + + stacked = _df.stack("var_name") + + stacked.name = "var_value" + + stacked = pd.DataFrame(stacked).reset_index() + + stacked = sort_values(stacked) + + stacked = format_header( + stacked, HEADER_B3_SCAL, esys_conf.settings.general.scal_index_name + ) + + return stacked + + +def round_setting_int(df, decimals): + r""" + Rounds the columns of a DataFrame to the specified decimals. For zero + decimals, it changes the dtype to Int64. Tolerates NaNs. + """ + _df = df.copy() + + for col, dec in decimals.items(): + if col not in _df.columns: + logger.warning( + f"No column named '{col}' found when trying to round." + ) + continue + elif dec == 0: + dtype = "Int64" + else: + dtype = float + + _df[col] = ( + pd.to_numeric(_df[col], errors="coerce").round(dec).astype(dtype) + ) + + return _df + + +def prepare_b3_timeseries(df_year, **kwargs): + """ + This function takes time series in column format, stacks them, assigns + values to additional columns and formats the header in order to prepare + data in a b3 time series format + + Parameters + ---------- + df_year : pd.Dataframe + DataFrame with total normalized data in year to be processed + kwargs : Additional keyword arguments + time series data (region, scenario key and unit) + + Returns + ------- + df_stacked : pd.DataFrame + DataFrame that contains stacked time series + + """ + # Stack time series with data of a year + df_year_stacked = stack_timeseries(df_year) + + # Add region, scenario key and unit to stacked time series + for key, value in kwargs.items(): + df_year_stacked[key] = value + + # Make sure that header is in correct format + df_year_stacked = format_header( + df_year_stacked, HEADER_B3_TS, esys_conf.settings.general.ts_index_name + ) + + return df_year_stacked + + +def _get_component_id_in_tuple(oemof_tuple, delimiter="-"): + r""" + Returns the id of the component in an oemof tuple. + If the component is first in the tuple, will return 0, + if it is second, 1. + + Parameters + ---------- + oemof_tuple : tuple + tuple of the form (node, node) or (node, None). + + Returns + ------- + component_id : int + Position of the component in the tuple + """ + # TODO: This is a dummy implementation that can easily fail + logger.warning( + "The implementation of _get_component_id_in_tuple is perliminary and " + "not very robust." + ) + return max( + enumerate(oemof_tuple), key=lambda x: len(x[1].split(delimiter)) + )[0] + + +def _get_component_from_tuple(_tuple, delimiter="-"): + # TODO: This is a dummy implementation that can easily fail + logger.warning( + "The implementation of _get_component_from_tuple is perliminary and " + "not very robust." + ) + return max(_tuple, key=lambda x: len(x.split(delimiter))) + + +def _get_direction(oemof_tuple): + comp_id = _get_component_id_in_tuple(oemof_tuple) + + directions = { + 0: "out", + 1: "in", + } + + other_id = { + 0: 1, + 1: 0, + }[comp_id] + + if oemof_tuple[other_id] == "nan": + return "" + else: + return directions[comp_id] + + +def _get_region_carrier_tech_from_component(component, delimiter="-"): + + if isinstance(component, oemof.tabular.facades.Facade): + region = component.region + carrier = component.carrier + tech = component.tech + + elif isinstance(component, str): + split = component.split(delimiter) + + if len(split) == 3: + region, carrier, tech = split + + if len(split) > 3: + + region, carrier, tech = "-".join(split[:2]), *split[2:] + warnings.warn( + f"Could not get region, carrier and tech by splitting " + f"component name into {split}. Assumed region='{region}', " + f"carrier='{carrier}', tech='{tech}'" + ) + + return region, carrier, tech + + +def oemof_results_ts_to_oemof_b3(df): + r""" + Transforms data in oemof-tabular/oemoflex format to stacked b3 timeseries + format. + + Parameters + ---------- + df : pd.DataFrame + Time series in oemof-tabular/oemoflex format. + + Returns + ------- + df : pd.DataFrame + Time series in oemof-tabular/oemoflex format. + """ + _df = df.copy() + + # The columns of oemof results are multiindex with 3 levels: (from, to, + # type). This is mapped to var_name = _ with "in" if bus + # comes first (from), "out" if bus is second (to). If the multiindex entry + # is of the form (component, None, type), then var_name = type + component = df.columns.droplevel(2).map(_get_component_from_tuple) + + # specify direction in var_name + direction = df.columns.droplevel(2).map(_get_direction) + + var_name = df.columns.get_level_values(2) + + var_name = list(zip(var_name, direction)) + + var_name = list(map(lambda x: "_".join(filter(None, x)), var_name)) + + # Introduce arbitrary unique columns before stacking. + _df.columns = range(len(_df.columns)) + + _df = stack_timeseries(_df) + + # assign values to other columns + _df["region"], _df["carrier"], _df["tech"] = zip( + *component.map(_get_region_carrier_tech_from_component) + ) + + _df["name"] = component + + _df["var_name"] = var_name + + # ensure that the format follows b3 schema + _df = format_header(_df, HEADER_B3_TS, "id_ts") + + return _df + + +class ScalarProcessor: + r""" + This class allows to filter and unstack scalar data in a way that makes + processing simpler. + """ + + def __init__(self, scalars): + self.scalars = scalars + + def get_unstacked_var(self, var_name): + r""" + Filters the scalars for the given var_name and returns the data in + unstacked form. + + Parameters + ---------- + var_name : str + Name of the variable + + Returns + ------- + result : pd.DataFrame + Data in unstacked form. + """ + _df = filter_df(self.scalars, "var_name", var_name) + + if _df.empty: + raise ValueError(f"No entries for {var_name} in df.") + + _df = unstack_var_name(_df) + + result = _df.loc[:, "var_value"] + + return result + + def drop(self, var_name): + + self.scalars = filter_df( + self.scalars, "var_name", var_name, inverse=True + ) + + def append(self, var_name, data): + r""" + Accepts a Series or DataFrame in unstacked form and appends it to the + scalars. + + Parameters + ---------- + var_name : str + Name of the data to append + data : pd.Series or pd.DataFrame + Data to append + + Returns + ------- + None + """ + assert not data.isna().all(), "Cannot append all NaN data." + + _df = data.copy() + + if isinstance(_df, pd.Series): + _df.name = var_name + + _df = pd.DataFrame(_df) + + _df = stack_var_name(_df) + + self.scalars = pd.concat([self.scalars, _df]) diff --git a/digipipe/esys/esys/tools/equate_flows.py b/digipipe/esys/esys/tools/equate_flows.py new file mode 100644 index 00000000..53bc1040 --- /dev/null +++ b/digipipe/esys/esys/tools/equate_flows.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +"""Constraints to relate variables in an existing model. + +This script is copied from oemof.solph +https://github.com/oemof/oemof-solph/blob/features/equate-flows/src/oemof/solph +/constraints/equate_flows.py +and can be deleted once oemof.tabular is updated to the new oemof version and +oemof-B3 installs oemof.solph version (or branch) containing this script. + +SPDX-FileCopyrightText: Uwe Krien +SPDX-FileCopyrightText: Simon Hilpert + +SPDX-License-Identifier: MIT + +""" + +from pyomo import environ as po + + +def equate_flows(model, flows1, flows2, factor1=1, name="equate_flows"): + r""" + Adds a constraint to the given model that sets the sum of two groups of + flows equal or proportional by a factor. + """ + + def _equate_flow_groups_rule(m): + for ts in m.TIMESTEPS: + sum1_t = sum(m.flow[fi, fo, ts] for fi, fo in flows1) + sum2_t = sum(m.flow[fi, fo, ts] for fi, fo in flows2) + expr = sum1_t * factor1 == sum2_t + if expr is not True: + getattr(m, name).add(ts, expr) + + setattr( + model, + name, + po.Constraint(model.TIMESTEPS, noruleinit=True), + ) + setattr( + model, + name + "_build", + po.BuildAction(rule=_equate_flow_groups_rule), + ) + + return model + + +def equate_flows_by_keyword( + model, keyword1, keyword2, factor1=1, name="equate_flows" +): + r""" + This wrapper for equate_flows allows to equate groups of flows by using a + keyword instead of a list of flows. + """ + flows = {} + for n, keyword in enumerate([keyword1, keyword2]): + flows[n] = [] + for (i, o) in model.flows: + if hasattr(model.flows[i, o], keyword): + flows[n].append((i, o)) + + return equate_flows(model, flows[0], flows[1], factor1=factor1, name=name) diff --git a/digipipe/esys/esys/tools/timing.py b/digipipe/esys/esys/tools/timing.py new file mode 100644 index 00000000..5aeae166 --- /dev/null +++ b/digipipe/esys/esys/tools/timing.py @@ -0,0 +1,45 @@ +r""" +A Timer class, adapted from https://github.com/realpython/codetiming +""" +import datetime +import time + + +class Timer: + """ + A timer utility to measure the elapsed time of code blocks. + adapted from https://github.com/realpython/codetiming + + Usage: + - Create a new timer with a descriptive text message. + - Start the timer with the `start()` method. + - Stop the timer with the `stop()` method to report the elapsed time. + """ + + def __init__(self, text, logger=print): + self._start_time = None + self.text = text + self.logger = logger + + def start(self): + """Start a new timer""" + self._start_time = time.perf_counter() + + def stop(self): + """Stop the timer, and report the elapsed time""" + elapsed_time = time.perf_counter() - self._start_time + self._start_time = None + if self.logger: + self.logger( + self.text + + f" Elapsed time: {datetime.timedelta(seconds=elapsed_time)}" + ) + + def __enter__(self): + """Start a new timer as a context manager""" + self.start() + return self + + def __exit__(self, *exc_info): + """Stop the context manager timer""" + self.stop() diff --git a/digipipe/esys/scenarios/2045_scenario.yml b/digipipe/esys/scenarios/2045_scenario.yml new file mode 100644 index 00000000..a2ab26a6 --- /dev/null +++ b/digipipe/esys/scenarios/2045_scenario.yml @@ -0,0 +1,45 @@ +# This scenario config file contains information for several steps in the model +# pipeline, i.e. build_datapackage, parametrize, optimize and potentially also +# postprocess or plot. + +# build_datapackage + +name: 2045_scenario +label: 2045-Scenario + +datetimeindex: + start: + "2019-01-01 00:00:00" + freq: + "H" + periods: + 8760 + +model_structure: + model_structure_full + +# parametrize + +paths_scalars: + - store/datasets/esys_raw/data/scalars/default_scalars.csv + - store/appdata/esys/_resources/scal_costs_efficiencies.csv + +filter_scalars: + 1: + scenario_key: + - "ALL" + - "2045_scenario" + +paths_resources: + + +paths_timeseries: + - store/datasets/esys_raw/data/time_series/ts_efficiencies.csv + - store/datasets/esys_raw/data/time_series/ts_feedin.csv + - store/datasets/esys_raw/data/time_series/ts_load.csv + +filter_timeseries: + scenario_key: + - "empty_ts" + timeindex_start: + "2019-01-01 00:00:00" diff --git a/digipipe/esys/scripts/build_datapackage.py b/digipipe/esys/scripts/build_datapackage.py new file mode 100644 index 00000000..16085c09 --- /dev/null +++ b/digipipe/esys/scripts/build_datapackage.py @@ -0,0 +1,346 @@ +# coding: utf-8 +r""" +Inputs +------- +scenario_specs : str + ``scenarios/{scenario}.yml``: path of input file (.yml) containing scenario + specifications +destination : str + ``results/{scenario}/preprocessed``: path of output directory +logfile : str + ``results/{scenario}/{scenario}.log``: path to logfile + +Outputs +--------- +oemoflex.EnergyDatapackage + EnergyDatapackage that can be read by oemof.tabular, with data (scalars and + timeseries) as csv and metadata (describing resources and foreign key + relations) as json. + +Description +------------- +The script creates an empty EnergyDatapackage from the specifications given in +the scenario_specs, fills it with scalar and timeseries data, infers the +metadata and saves it to the given destination. Further, additional parameters +like emission limit are saved in a separate file. + +Explanations about the structure of the preprocessed datapackage can be found +in section :ref:`Build datapackages` of the +`docu `_. + +""" +import os +import sys +from collections import OrderedDict + +import pandas as pd +from oemoflex import config as oemoflex_config +from oemoflex.model.datapackage import EnergyDataPackage + +from digipipe.esys.esys.config import esys_conf +from digipipe.esys.esys.config.esys_conf import load_yaml +from digipipe.esys.esys.model import ( + bus_attrs_update, + component_attrs_update, + foreign_keys_update, + model_structures, +) +from digipipe.esys.esys.tools.data_processing import ( + expand_regions, + filter_df, + multi_load_b3_scalars, + multi_load_b3_timeseries, + prepare_attr_name, + save_df, + unstack_timeseries, + update_filtered_df, +) + + +def update_with_checks(old, new): + r""" + Updates a Series or DataFrame with new data. Raises a warning if there is + new data that is not in the index of the old data. + + Parameters + ---------- + old : pd.Series or pd.DataFrame + Old Series or DataFrame to update + new : pd.Series or pd.DataFrame + New Series or DataFrame + + Returns + ------- + None + """ + # Check if some data would get lost + if not new.index.isin(old.index).all(): + logger.warning("Index of new data is not in the index of old data.") + + try: + # Check if it overwrites by setting errors = 'raise' + old.update(new, errors="raise") + except ValueError: + old.update(new, errors="ignore") + logger.warning("Update overwrites existing data.") + + +def parametrize_scalars(edp, scalars, filters): + r""" + Parametrizes an oemoflex.EnergyDataPackage with scalars. Accepts an + OrderedDict of filters that is used to filter the scalars and + subsequently update the EnergyDatapackage. + + Parameters + ---------- + edp : oemoflex.EnergyDatapackage + EnergyDatapackage to parametrize + scalars : pd.DataFrame in oemof_B3-Resources format. + Scalar data + filters : OrderedDict + Filters for the scalar data + + Returns + ------- + edp : oemoflex.EnergyDatapackage + Parametrized EnergyDatapackage + """ + edp.stack_components() + + # apply filters subsequently + filtered = update_filtered_df(scalars, filters) + + # set index to component name and var_name + filtered = filtered.set_index(["name", "var_name"]).loc[:, "var_value"] + + # check if there are duplicates after setting index + duplicated = filtered.loc[filtered.index.duplicated()] + + if duplicated.any(): + raise ValueError( + f"There are duplicates in the scalar data: {duplicated}" + ) + + update_with_checks(edp.data["component"], filtered) + + edp.unstack_components() + + logger.info(f"Updated DataPackage with timeseries from '{paths_scalars}'.") + + return edp + + +def parametrize_sequences(edp, ts, filters): + r""" + Parametrizes an oemoflex.EnergyDataPackage with timeseries. + + Parameters + ---------- + edp : oemoflex.EnergyDatapackage + EnergyDatapackage to parametrize + ts : pd.DataFrame in oemof_B3-Resources format. + Timeseries data + filters : dict + Filters for timeseries data + + Returns + ------- + edp : oemoflex.EnergyDatapackage + Parametrized EnergyDatapackage + """ + # Filter timeseries + _ts = ts.copy() + + for key, value in filters.items(): + _ts = filter_df(_ts, key, value) + + # Group timeseries and parametrize EnergyDatapackage + ts_groups = _ts.groupby("var_name") + + for name, group in ts_groups: + + data = group.copy() # avoid pandas SettingWithCopyWarning + + data.loc[:, "var_name"] = ( + data.loc[:, "region"] + "-" + data.loc[:, "var_name"] + ) + + data_unstacked = unstack_timeseries(data) + + edp.data[name] = data_unstacked + + edp.data[name].index.name = "timeindex" + + logger.info( + f"Updated DataPackage with timeseries from '{paths_timeseries}'." + ) + + return edp + + +def load_additional_scalars(scalars, filters): + """Loads additional scalars like the emission limit and filters by + 'scenario_key'""" + # get electricity/gas relations and parameters for the calculation of + # emission_limit + el_gas_rel = scalars.loc[ + scalars.var_name == esys_conf.settings.build_datapackage.el_gas_relation + ] + emissions = scalars.loc[ + scalars.carrier == esys_conf.settings.build_datapackage.emission + ] + + # get `output_parameters` of backpressure components as they are not taken + # into consideration in oemof.tabular so far. They are added to the + # components' output flow towards the heat bus in script `optimize.py`. + bpchp_out = scalars.loc[ + (scalars.tech == "bpchp") & (scalars.var_name == "output_parameters") + ] + + # concatenate data for filtering + df = pd.concat([el_gas_rel, emissions, bpchp_out]) + + # subsequently apply filters + filtered_df = update_filtered_df(df, filters) + + # calculate emission limit and prepare data frame in case all necessary data + # is available + _filtered_df = filtered_df.copy().set_index("var_name") + try: + emission_limit = calculate_emission_limit( + _filtered_df.at["emissions_1990", "var_value"], + _filtered_df.at["emissions_not_modeled", "var_value"], + _filtered_df.at["emission_reduction_factor", "var_value"], + ) + except KeyError: + emission_limit = None + + emission_limit_df = pd.DataFrame( + { + "var_name": "emission_limit", + "var_value": emission_limit, + "carrier": "emission", + "var_unit": "kg_CO2_eq", + "scenario_key": "ALL", + }, + index=[0], + ) + + # add emission limit to filtered additional scalars and adapt format of + # data frame + add_scalars = pd.concat([filtered_df, emission_limit_df], sort=False) + add_scalars.reset_index(inplace=True, drop=True) + add_scalars.index.name = "id_scal" + + return add_scalars + + +def save_additional_scalars(additional_scalars, destination): + """Saves `additional_scalars` to additional_scalar_file in `destination`""" + filename = os.path.join( + destination, + esys_conf.settings.build_datapackage.additional_scalars_file, + ) + save_df(additional_scalars, filename) + + logger.info(f"Saved additional scalars to '{filename}'.") + + +def calculate_emission_limit( + emissions_1990, emissions_not_modeled, emission_reduction_factor +): + """Calculates the emission limit. + Emission limit is calculated by + emissions_1990 * (1 - emission_reduction_factor) - emissions_not_modeled""" + + return ( + emissions_1990 * (1 - emission_reduction_factor) - emissions_not_modeled + ) + + +if __name__ == "__main__": + scenario_specs = sys.argv[1] + + destination = sys.argv[2] + + logger = esys_conf.add_snake_logger("build_datapackage") + + scenario_specs = load_yaml(scenario_specs) + + model_structure = model_structures[scenario_specs["model_structure"]] + + oemoflex_config.config.settings.SEPARATOR = ( + esys_conf.settings.general.separator + ) + + # setup empty EnergyDataPackage + datetimeindex = pd.date_range( + start=scenario_specs["datetimeindex"]["start"], + freq=scenario_specs["datetimeindex"]["freq"], + periods=scenario_specs["datetimeindex"]["periods"], + ) + + # setup default structure + edp = EnergyDataPackage.setup_default( + basepath=destination, + datetimeindex=datetimeindex, + bus_attrs_update=bus_attrs_update, + component_attrs_update=component_attrs_update, + name=scenario_specs["name"], + regions=model_structure["regions"], + links=model_structure["links"], + busses=model_structure["busses"], + components=model_structure["components"], + ) + + # parametrize scalars + paths_scalars = scenario_specs["paths_scalars"] + + scalars = multi_load_b3_scalars(paths_scalars) + + # Replace 'ALL' in the column regions by the actual regions + scalars = expand_regions(scalars, model_structure["regions"]) + + # Check and set attribute 'name' + scalars = prepare_attr_name( + scalars, + esys_conf.settings.build_datapackage.overwrite_name, + ) + + # get filters for scalars + filters = OrderedDict(sorted(scenario_specs["filter_scalars"].items())) + + # load additional scalars like "emission_limit" and filter by `filters` in + # 'scenario_key' + additional_scalars = load_additional_scalars( + scalars=scalars, filters=filters + ) + + # Drop those scalars that do not belong to a specific component + scalars = scalars.loc[~scalars["name"].isna()] + + # filter and parametrize scalars + edp = parametrize_scalars(edp, scalars, filters) + + # parametrize timeseries + paths_timeseries = scenario_specs["paths_timeseries"] + + ts = multi_load_b3_timeseries(paths_timeseries) + + filters = scenario_specs["filter_timeseries"] + + edp = parametrize_sequences(edp, ts, filters) + + # save to csv + edp.to_csv_dir(destination) + + logger.info(f"Saved datapackage to '{destination}'.") + + save_additional_scalars( + additional_scalars=additional_scalars, destination=destination + ) + + logger.info(f"Saved additional_scalars to '{destination}'.") + + # add metadata + edp.infer_metadata(foreign_keys_update=foreign_keys_update) diff --git a/digipipe/esys/scripts/create_empty_scalars.py b/digipipe/esys/scripts/create_empty_scalars.py new file mode 100644 index 00000000..acabbcbd --- /dev/null +++ b/digipipe/esys/scripts/create_empty_scalars.py @@ -0,0 +1,209 @@ +# coding: utf-8 +r""" +Inputs +------- +scenarios_dir : str + ``scenarios``: path to scenarios directory +destination : str + ``raw/scalars/empty_scalars.csv``: path of output directory for empty + scalars of all scenarios + +Outputs +--------- +pandas.DataFrame + Empty scalar in oemof-B3 resource format. + +Description +------------- +The script creates an empty DataFrame for scalar data that serve as a template +for input data. +""" +import os +import sys + +import pandas as pd +from oemoflex.model.datapackage import EnergyDataPackage + +from digipipe.esys.esys import model +from digipipe.esys.esys.config.esys_conf import load_yaml, settings +from digipipe.esys.esys.model import bus_attrs_update, model_structures +from digipipe.esys.esys.tools.data_processing import ( + HEADER_B3_SCAL, + format_header, + load_b3_scalars, + save_df, + sort_values, +) + +NON_REGIONAL = settings.create_empty_scalars.non_regional + + +def get_edp_from_scenario(scenario_specs): + model_structure = model_structures[scenario_specs["model_structure"]] + + # setup empty EnergyDataPackage + datetimeindex = pd.date_range( + start=scenario_specs["datetimeindex"]["start"], + freq=scenario_specs["datetimeindex"]["freq"], + periods=scenario_specs["datetimeindex"]["periods"], + ) + + # setup default structure + edp = EnergyDataPackage.setup_default( + basepath=destination, + datetimeindex=datetimeindex, + bus_attrs_update=bus_attrs_update, + component_attrs_update=component_attrs_update, + name=scenario_specs["name"], + regions=model_structure["regions"], + links=model_structure["links"], + busses=model_structure["busses"], + components=model_structure["components"], + ) + + edp.stack_components() + + return edp + + +def format_input_scalars(df): + _df = df.copy() + + _df = format_header(_df, HEADER_B3_SCAL, "id_scal") + + # Keep only those rows whose values are not set + if settings.create_empty_scalars.drop_default_scalars: + _df = _df.loc[_df.loc[:, "var_value"].isna()] + + # Combine those parameters that are valid for all regions + _df.loc[_df["var_name"].isin(NON_REGIONAL), ["name", "region"]] = [ + None, + "ALL", + ] + + _df.drop_duplicates(inplace=True) + + _df = sort_values(_df) + + return _df + + +def expand_scalars(df, column, where, expand): + _df = df.copy() + + _df_cc = _df.loc[df[column] == where].copy() + + _df_wo_cc = _df.loc[df[column] != where].copy() + + for var in expand: + + d = _df_cc.copy() + + d[column] = var + + _df_wo_cc = pd.concat([_df_wo_cc, d]) + + _df_wo_cc = sort_values(_df_wo_cc) + + return _df_wo_cc + + +def add_new_entry_to_scalars(sc, new_entry_dict): + sc = sc.append(new_entry_dict, ignore_index=True) + + return sc + + +def save_empty_scalars(sc, path): + + if os.path.exists(path): + all_sc = load_b3_scalars(path) + + if all_sc.empty: + all_sc = sc + else: + all_sc = all_sc.append(sc, ignore_index=True) + all_sc.index.name = sc.index.name + + else: + all_sc = sc + + all_sc.drop_duplicates(inplace=True) + + save_df(all_sc, path) + + +if __name__ == "__main__": + scenarios_dir = sys.argv[1] + + destination = sys.argv[2] + + scenarios = os.listdir(scenarios_dir) + + for scenario_specs in scenarios: + component_attrs_update = load_yaml( + os.path.join(model.here, "component_attrs_update.yml") + ) + + scenario_specs = load_yaml(os.path.join(scenarios_dir, scenario_specs)) + + edp = get_edp_from_scenario(scenario_specs) + + components = edp.data["component"].reset_index() + + empty_scalars = format_input_scalars(components) + + # set scenario name + empty_scalars.loc[:, "scenario_key"] = scenario_specs["name"] + + # if empty raw scalars should be created, reverse the annuisation as + # well. + # if empty resources scalars are needed, set this to False. + raw_scalars = True + if raw_scalars: + empty_scalars = expand_scalars( + empty_scalars, + column="var_name", + where="capacity_cost", + expand=["capacity_cost_overnight", "lifetime", "fixom_cost"], + ) + + empty_scalars = expand_scalars( + empty_scalars, + column="var_name", + where="storage_capacity_cost", + expand=[ + "storage_capacity_cost_overnight", + "storage_fixom_cost", + ], + ) + + # Add wacc + wacc_dict = {"scenario_key": scenario_specs["name"]} + wacc_dict.update(settings.create_empty_scalars.wacc) + empty_scalars = add_new_entry_to_scalars(empty_scalars, wacc_dict) + + # Add emission limits + emissions_not_modeled = {"scenario_key": scenario_specs["name"]} + emissions_not_modeled.update( + settings.create_empty_scalars.emissions_not_modeled + ) + empty_scalars = add_new_entry_to_scalars( + empty_scalars, emissions_not_modeled + ) + + emissions_1990 = {"scenario_key": scenario_specs["name"]} + emissions_1990.update(settings.create_empty_scalars.emissions_1990) + empty_scalars = add_new_entry_to_scalars(empty_scalars, emissions_1990) + + emission_reduction_factor = {"scenario_key": scenario_specs["name"]} + emission_reduction_factor.update( + settings.create_empty_scalars.emission_reduction_factor + ) + empty_scalars = add_new_entry_to_scalars( + empty_scalars, emission_reduction_factor + ) + + empty_scalars = sort_values(empty_scalars) + + save_empty_scalars(empty_scalars, destination) diff --git a/digipipe/esys/scripts/create_empty_ts.py b/digipipe/esys/scripts/create_empty_ts.py new file mode 100644 index 00000000..88037b8a --- /dev/null +++ b/digipipe/esys/scripts/create_empty_ts.py @@ -0,0 +1,360 @@ +# coding: utf-8 +r""" +Inputs +------- +scenarios_dir : str + ``esys/scenarios``: path to scenarios directory +destination : str + ``store/datasets/esys_raw/time_series/empty_ts_efficiencies.csv``: path of + output directory for empty ts with efficiencies of all scenarios + ``store/datasets/esys_raw/time_series/empty_ts_feedin.csv``: path of output + directory for empty ts with feedins of all scenarios + ``store/datasets/esys_raw/time_series/empty_ts_load.csv``: path of output + directory for empty ts with loads of all scenarios + +Outputs +--------- +pandas.DataFrame + Empty ts in oemof-B3 resource format. + +Description +------------- +The script creates empty DataFrames for load, feed-in and efficiency time +series data that serve as template for input data. +""" +import os +import sys +from datetime import datetime + +import numpy as np +import pandas as pd +from oemoflex.model.model_structure import module_path + +from digipipe.esys.esys import model +from digipipe.esys.esys.config.esys_conf import load_yaml, settings +from digipipe.esys.esys.model import model_structures +from digipipe.esys.esys.tools.data_processing import ( + HEADER_B3_TS, + stack_timeseries, +) + + +def get_sub_dict(subsub_key, _dict): + """ + This function extracts a subsub-dictionary from a dictionary using a + subsub-key + + Inputs + ------- + subsub_key : str + Key of the subsub-dictionary + + _dict : dict + Dictionary with two inner dictionaries + + Outputs + ------- + subsub_dict : dictionary + Subsub-dictionary + + """ + + subsub_dict = [ + subsubdict + for sub_dict in _dict.values() + for subsubdict in sub_dict.values() + if subsub_key in subsubdict + ] + + return subsub_dict + + +def create_empty_ts_with_zero_or_nan_values(periods, date_rng, name): + """ + Returns a pandas DataFrame with time series values set to either zeros or + NaNs, based on settings.yaml + + Parameters + ---------- + periods : int + Number of periods in the time series + date_rng : pd.DatetimeIndex + Datetime index specifying start and end dates of the time series + name : str + Name of the time series column in the DataFrame + + Returns + ------- + df : pd.DataFrame + A pandas DataFrame containing the time series values, with 'name' as + the column name. + + Raises + ------ + KeyError + If settings.create_empty_ts.ts_values is not set to either 'zeros' or + 'empty'. + """ + ts_values = settings.create_empty_ts.ts_values + + if ts_values == "zeros": + df = pd.DataFrame( + np.zeros((periods, 1)), index=date_rng, columns=[name] + ) + elif ts_values == "empty": + df = pd.DataFrame( + np.empty((periods, 1)) * np.nan, index=date_rng, columns=[name] + ) + else: + raise KeyError( + f"{ts_values} is not a valid option. Valid options are: 'zeros' or" + f"'empty'. Please provide a valid value for ts_values in " + f"settings.yaml" + ) + + return df + + +def create_empty_ts(name): + """ + This function provides a Dataframe with a time series of zeros + according to the start, periods and freq of the scenario specifications + + Parameters + ---------- + name : str + Name of the ts + + Returns + ------- + df : Dataframe + Dataframe containing ts with zeros as values and name as column name + + """ + datetime_format = settings.create_empty_ts.datetime_format + + # Get start date from scenario specifications + start = datetime.strptime( + scenario_specs["filter_timeseries"]["timeindex_start"], datetime_format + ) + + # Get periods and freq from scenario specifications + periods = scenario_specs["datetimeindex"]["periods"] + freq = scenario_specs["datetimeindex"]["freq"] + + # Get date range from start periods and freq + date_rng = pd.date_range(start=start, periods=periods, freq=freq) + + # Create DataFrame with ts of zeros from date range and name + df = create_empty_ts_with_zero_or_nan_values(periods, date_rng, name) + + return df + + +def get_df_of_all_empty_ts(profile_names, _region): + """ + This function provides a Dataframe with all ts of a profile (load, feedin + or efficiency) + + Inputs + ------- + profile_names : list + List with names of profiles (loads, feedins or efficiencies) + + _region : str + Region + + Outputs + ------- + ts_df : Dataframe + Dataframe with empty time series (consisting of zeros) + + """ + ts_df = pd.DataFrame(columns=HEADER_B3_TS) + + for name in profile_names: + df = create_empty_ts(name) + + # Stack Dataframe + stacked_df = stack_timeseries(df) + + # Reindex according to time series in schema directory + stacked_df = stacked_df.reindex(columns=HEADER_B3_TS) + + # Add region and scenario_specs to Dataframe + stacked_df["region"] = _region + stacked_df["scenario_key"] = settings.create_empty_ts.filter_ts + + # Append Dataframe to ts_profile_df and add index name + ts_df = pd.concat([ts_df, stacked_df], ignore_index=True) + ts_df.index.name = "id_ts" + + return ts_df + + +def drop_duplicates(_df): + """ + Remove duplicate rows from a pandas DataFrame based on specified columns. + + Parameters + ---------- + _df : pandas DataFrame + The DataFrame to remove duplicates from. + + Returns + ------- + _df : pandas DataFrame + The updated DataFrame with duplicate rows removed. + + Notes + ----- + Duplicate rows are determined based on the values in the specified columns. + By default, all columns except the "series" column are used to determine + duplicates. If there are multiple rows with the same values in the + specified columns, only the first occurrence is kept and subsequent + occurrences are dropped. + """ + columns = [col for col in _df.columns if col != "series"] + + _df = _df.drop_duplicates( + subset=columns, + ignore_index=True, + ) + + return _df + + +def save_ts(_df, path): + """ + This function saves time series that contain values of datetime format + Datetime format used: "%Y-%m-%d %H:%M:%S" + + Parameters + ---------- + _df : Dataframe + Dataframe with value(s) of datetime format + + path : str + Path where df is saved to + + + """ + if not os.path.exists(path) or settings.create_empty_ts.overwrite: + + _df.index.name = "id_ts" + + _df.to_csv( + path, + index=True, + date_format="%Y-%m-%d %H:%M:%S", + sep=settings.general.separator, + ) + + +if __name__ == "__main__": + scenarios_dir = sys.argv[1] + + path_empty_load_ts = sys.argv[2] + path_empty_ts_feedin = sys.argv[3] + path_empty_ts_efficiencies = sys.argv[4] + + scenarios = os.listdir(scenarios_dir) + + all_load_ts = pd.DataFrame(columns=HEADER_B3_TS) + all_feedin_ts = pd.DataFrame(columns=HEADER_B3_TS) + all_efficiencies_ts = pd.DataFrame(columns=HEADER_B3_TS) + + component_attrs_file = (os.path.join(module_path, "component_attrs.yml"),) + component_attrs = load_yaml(component_attrs_file[0]) + + for scenario_specs in scenarios: + scenario_specs = load_yaml(os.path.join(scenarios_dir, scenario_specs)) + model_structure = model_structures[scenario_specs["model_structure"]] + + component_attrs_update = load_yaml( + os.path.join(model.here, "component_attrs_update.yml") + ) + + # Get all foreign_keys that contain "profile" from + # component_attrs + foreign_keys_profile = get_sub_dict("profile", component_attrs) + + # Get all foreign_keys that contain "profile" from + # component_attrs_update + foreign_keys_profile.extend( + get_sub_dict("profile", component_attrs_update) + ) + + # Drop the ones that are not listed in model_structure + foreign_keys_profile = [ + fk + for fk in foreign_keys_profile + if fk["profile"].split("-profile")[0] + in model_structure["components"] + ] + + # Get all foreign_keys that contain "efficiency" from + # component_attrs_update + foreign_keys_efficiency = get_sub_dict( + "efficiency", component_attrs_update + ) + + # Save profile names depending on whether it is a load, a feed-in or an + # efficiency + load_names = [ + attr_subdict["profile"] + for attr_subdict in foreign_keys_profile + if "demand" in attr_subdict["profile"] # noqa: E713 + ] + feedin_names = [ + attr_subdict["profile"] + for attr_subdict in foreign_keys_profile + if not "demand" in attr_subdict["profile"] # noqa: E713 + ] + efficiency_names = [ + attr_subdict["efficiency"] + for attr_subdict in foreign_keys_efficiency + ] + + ts_dict = { + "load": load_names, + "feedin": feedin_names, + "efficiency": efficiency_names, + } + + for region in model_structure["regions"]: + + if load_names: + df_loads = get_df_of_all_empty_ts(load_names, region) + all_load_ts = pd.concat( + [all_load_ts, df_loads], ignore_index=True + ) + + if feedin_names: + df_feedin = get_df_of_all_empty_ts(feedin_names, region) + all_feedin_ts = pd.concat( + [all_feedin_ts, df_feedin], ignore_index=True + ) + + if efficiency_names: + df_efficiencies = get_df_of_all_empty_ts( + efficiency_names, region + ) + all_efficiencies_ts = pd.concat( + [all_efficiencies_ts, df_efficiencies], ignore_index=True + ) + + ts_dict = { + "load": (load_names, all_load_ts, path_empty_load_ts), + "feedin": (feedin_names, all_feedin_ts, path_empty_ts_feedin), + "efficiency": ( + efficiency_names, + all_efficiencies_ts, + path_empty_ts_efficiencies, + ), + } + + for ts_type, (ts_name, ts_data, ts_path) in ts_dict.items(): + if ts_name: + ts_data = drop_duplicates(ts_data) + save_ts(ts_data, ts_path) diff --git a/digipipe/esys/scripts/optimize.py b/digipipe/esys/scripts/optimize.py new file mode 100644 index 00000000..53ae65e5 --- /dev/null +++ b/digipipe/esys/scripts/optimize.py @@ -0,0 +1,329 @@ +# coding: utf-8 +r""" +Inputs +------- +preprocessed : str + ``results/{scenario}/preprocessed``: Path to preprocessed EnergyDatapackage + containing elements, sequences and datapackage.json. +optimized : str + ``results/{scenario}/optimized/`` Target path to store dump of + oemof.solph.Energysystem with optimization results and parameters. +logfile : str + ``results/{scenario}/{scenario}.log``: path to logfile + +Outputs +--------- +es.dump + oemof.solph.EnergySystem with results, meta-results and parameters + +Description +------------- +Given an EnergyDataPackage, this script creates an oemof.solph.EnergySystem and +an oemof.solph.Model, which is optimized. + +The following constraints are added: + - `emission_limit`: maximum amount of emissions + - `equate_flows_by_keyword`: electricity-gas relation is set + (electricity/gas = factor). + This constraint is only added if 'electricity_gas_relation' is added to + the scalars. + To use this constraint you need to copy + [`equate_flows.py`](https://github.com/oemof/oemof-solph/blob/features/ + equate-flows/src/oemof/solph/constraints/equate_variables.py) + of oemof.solph into `/tools` directory of `oemof-B3`. + +The EnergySystem with results, meta-results and parameters is saved. +""" +import logging +import os +import sys + +import numpy as np +from oemof import solph +from oemof.solph import EnergySystem, Model, constraints, processing + +# DONT REMOVE THIS LINE! +# pylint: disable=unusedimport +from oemof.tabular import datapackage # noqa +from oemof.tabular.facades import TYPEMAP + +from digipipe.esys.esys.config import esys_conf +from digipipe.esys.esys.tools import data_processing as dp +from digipipe.esys.esys.tools.equate_flows import equate_flows_by_keyword +from digipipe.esys.esys.tools.timing import Timer + +logger = logging.getLogger() + + +def drop_values_by_keyword(df, keyword="None"): + """drops row if `var_value` is None""" + drop_indices = df.loc[df.var_value == keyword].index + df = df.drop(drop_indices) + return df + + +def get_emission_limit(scalars): + """ + Gets emission limit from scalars and returns None if it is missing or + None. + """ + emission_df_raw = scalars.loc[scalars["carrier"] == "emission"].set_index( + "var_name" + ) + emission_df = drop_values_by_keyword(emission_df_raw) + + # return None if no emission limit is given ('None' or entry missing) + if ( + emission_df.empty + or emission_df.at["emission_limit", "var_value"] is np.nan + ): + logger.info("No emission limit will be set.") + return None + else: + limit = emission_df.at["emission_limit", "var_value"] + logger.info(f"Emission limit will be set to {limit}.") + return limit + + +def get_electricity_gas_relations(scalars): + r""" + Gets electricity/gas relations from scalars. Returns None if no relations + are given. + + Returns + ------- + pd.DataFrame + Contains rows of scalars with 'var_name' `EL_GAS_RELATION`. + If no relation is given returns None. + """ + relations_raw = scalars.loc[ + scalars.var_name == esys_conf.settings.optimize.el_gas_relation + ] + # drop relations that are None + relations = drop_values_by_keyword(relations_raw) + if relations.empty: + logger.info("No gas electricity relation will be set.") + return None + else: + busses = relations.carrier.drop_duplicates().values + logger.info( + f"Gas electricity relations will be set for busses: {busses}" + ) + return relations + + +def get_bpchp_output_parameters(scalars): + r"""Gets 'output_parameters' of backpressure CHPs from scalars and + returns None if it is missing or None.""" + bpchp_outs_raw = scalars.loc[ + (scalars.tech == "bpchp") & (scalars.var_name == "output_parameters") + ] + + # drop rows that have empty dict as var_value + bpchp_outs = drop_values_by_keyword(bpchp_outs_raw, "{}") + if bpchp_outs.empty: + return None + else: + return bpchp_outs + + +def add_output_parameters_to_bpchp(parameters, energysystem): + r""" + Adds keywords for electricity-gas relation constraint to backpressure CHPs. + + This is necessary as oemof.tabular does not support `output_parameters` of + these components, yet. The keywords are set as attributes of the output + flow towards `heat_bus`. + + Parameters + ---------- + parameters : pd.DataFrame + Contains output_parameters of backpressure CHP scalars. + energysystem : oemof.solph.network.EnergySystem + The energy system + """ + # rename column 'name' as is collides with iterrows() + parameters.rename(columns={"name": "name_"}, inplace=True) + for i, element in parameters.iterrows(): + if element.name_ in energysystem.groups: + # get heat bus the component is connected to + bus = energysystem.groups[element.name_].heat_bus + + # get keyword and boolean value + split_str = element.var_value.split('"') + keyword = split_str[1] + value = bool(split_str[2].split("}")[0].split()[1]) + + # set keyword as attribute with value + setattr( + energysystem.groups[element.name_].outputs.data[bus], + keyword, + value, + ) + else: + logging.warning( + f"No element '{element.name_}' in EnergySystem. Cannot add " + f"output_parameters." + ) + + return energysystem + + +def add_electricity_gas_relation_constraints(model, relations): + r""" + Adds constraint `equate_flows_by_keyword` to `model`. + + The components belonging to 'electricity' or 'gas' are selected by + keywords. The keywords of components powered by gas start with + `esys_conf.settings.optimize.gas_key` and such powered by electricity with + `esys_conf.settings.optimize.el_key`, followed by `carrier` and `region` + e.g. <`GAS_KEY`>--. + + Parameters + ---------- + model : oemof.solph.Model + optmization model + relations : pd.DataFrame + Contains electricity/gas relations in column 'var_value'. Further + contains at least columns 'carrier' and 'region'. + """ + for index, row in relations.iterrows(): + # Formulate suffix for keywords - + suffix = f"{row.carrier}-{row.region}" + equate_flows_by_keyword( + model=model, + keyword1=f"{esys_conf.settings.optimize.gas_key}-{suffix}", + keyword2=f"{esys_conf.settings.optimize.el_key}-{suffix}", + factor1=row.var_value, + name=f"equate_flows_{suffix}", + ) + + +def get_additional_scalars(): + """ + Returns additional scalars as pd.DataFrame or None if file does not exist + """ + filename_add_scalars = os.path.join(preprocessed, "additional_scalars.csv") + if os.path.exists(filename_add_scalars): + scalars = dp.load_b3_scalars(filename_add_scalars) + return scalars + else: + return None + + +if __name__ == "__main__": + preprocessed = sys.argv[1] + + optimized = sys.argv[2] + + logfile = sys.argv[3] + logger = esys_conf.add_snake_logger("optimize") + + # get additional scalars, set to None at first + emission_limit = None + el_gas_relations = None + bpchp_out = None + additional_scalars = get_additional_scalars() + if additional_scalars is not None: + emission_limit = get_emission_limit(additional_scalars) + el_gas_relations = get_electricity_gas_relations(additional_scalars) + bpchp_out = get_bpchp_output_parameters(additional_scalars) + + if not os.path.exists(optimized): + os.mkdir(optimized) + + try: + + logger.info( + f"Created solph.EnergSystem using oemof.solph version " + f"'{solph.__version__}'." + ) + + with Timer(text="Created solph.Energystem.", logger=logger.info): + es = EnergySystem.from_datapackage( + os.path.join( + preprocessed, esys_conf.settings.optimize.filename_metadata + ), + attributemap={}, + typemap=TYPEMAP, + ) + + # Reduce number of timestep for debugging + if esys_conf.settings.optimize.debug: + es.timeindex = es.timeindex[:3] + + logger.info( + "Using DEBUG mode: Running model with first 3 timesteps only." + ) + + # add output_parameters of bpchp + if bpchp_out is not None: + es = add_output_parameters_to_bpchp( + parameters=bpchp_out, energysystem=es + ) + + # create model from energy system (this is just oemof.solph) + logger.info("Creating solph.Model.") + + with Timer(text="Created solph.Model.", logger=logger.info): + m = Model(es) + + # add constraints + logger.info("Setting constraints.") + + if emission_limit is not None: + constraints.emission_limit(m, limit=emission_limit) + if el_gas_relations is not None: + add_electricity_gas_relation_constraints( + model=m, relations=el_gas_relations + ) + + # tell the model to get the dual variables when solving + if esys_conf.settings.optimize.receive_duals: + m.receive_duals() + + # save solver log to scenario specific location + solve_kwargs = esys_conf.settings.optimize.solve_kwargs + solve_kwargs["logfile"] = ( + logfile.split("." + logfile.split(".")[-1])[0] + "_solver_log.log" + ) + + logger.info( + f"Solving with solver '{esys_conf.settings.optimize.solver}' " + f"using solve_kwargs '{esys_conf.settings.optimize.solve_kwargs}' " + f"and cmdline_options " + f"'{esys_conf.settings.optimize.cmdline_options}'." + ) + + with Timer(text="Solved the model.", logger=logger.info): + if esys_conf.settings.optimize.write_lp_file: + m.write( + os.path.join(optimized, "optimized.lp"), + io_options={"symbolic_solver_labels": True}, + ) + m.solve( + solver=esys_conf.settings.optimize.solver, + solve_kwargs=esys_conf.settings.optimize.solve_kwargs, + cmdline_options=esys_conf.settings.optimize.cmdline_options, + ) + + except: # noqa: E722 + logger.exception( + f"Could not optimize energysystem for datapackage from " + f"'{preprocessed}'." + ) + raise + + else: + + logger.info("Model solved. Collecting results.") + + # get results from the solved model(still oemof.solph) + es.meta_results = processing.meta_results(m) + es.results = processing.results(m) + es.params = processing.parameter_as_dict(es) + + # dump the EnergySystem + es.dump(optimized) + + logger.info(f"Results saved to {optimized}.") diff --git a/digipipe/esys/scripts/postprocess.py b/digipipe/esys/scripts/postprocess.py new file mode 100644 index 00000000..157d3aba --- /dev/null +++ b/digipipe/esys/scripts/postprocess.py @@ -0,0 +1,73 @@ +# coding: utf-8 +r""" +Inputs +------- +optimized : str + ``results/{scenario}/optimized``: Directory containing dump of + oemof.solph.Energysystem with optimization results and parameters. +scenario_name : str + ``{scenario}``: Name of the scenario. +destination : str + ``results/{scenario}/postprocessed``: Target path for postprocessed + results. +logfile : str + ``results/{scenario}/{scenario}.log``: path to logfile + +Outputs +--------- +oemoflex.ResultsDatapackage + ResultsDatapackage + +Description +------------- +The script performs the postprocessing of optimization results. + +Explanations about the structure of the postprocessed data can be found in +section :ref:`Postprocessing` of the +`docu `_. +""" +import os +import sys + +import pandas as pd +from oemof.solph import EnergySystem +from oemoflex import config as oemoflex_config +from oemoflex.model.datapackage import ResultsDataPackage + +from digipipe.esys.esys.config import esys_conf + +if __name__ == "__main__": + + optimized = sys.argv[1] + + scenario_name = sys.argv[2] + + destination = sys.argv[3] + + logger = esys_conf.add_snake_logger("postprocess") + + oemoflex_config.config.settings.SEPARATOR = ( + esys_conf.settings.general.separator + ) + + try: + es = EnergySystem() + + es.restore(optimized) + + rdp = ResultsDataPackage.from_energysytem(es) + + rdp.set_scenario_name(scenario_name) + + rdp.to_csv_dir(destination) + + pd.Series({"objective": es.meta_results["objective"]}).to_csv( + os.path.join(destination, "objective.csv"), + sep=esys_conf.settings.general.separator, + ) + + except: # noqa: E722 + logger.exception( + f"Could not postprocess data from energysystem in '{optimized}'." + ) + raise diff --git a/digipipe/esys/scripts/prepare_scalars.py b/digipipe/esys/scripts/prepare_scalars.py new file mode 100644 index 00000000..eb98f2cf --- /dev/null +++ b/digipipe/esys/scripts/prepare_scalars.py @@ -0,0 +1,101 @@ +# coding: utf-8 +r""" +Inputs +------- +in_path1 : str + ``raw/scalars/costs_efficiencies.csv``: path incl. file name of input file + with raw scalar data +out_path : str + ``results/_resources/scal_costs_efficiencies.csv``: path incl. file name of + output file with prepared scalar data + +Outputs +--------- +pandas.DataFrame + with scalar data prepared for parametrization + +Description +------------- +The script performs the following steps to prepare scalar data for +parametrization: + +* Calculate annualized investment cost from overnight cost, lifetime and wacc. +""" + +import sys + +from oemof.tools.economics import annuity + +from digipipe.esys.esys.config import esys_conf +from digipipe.esys.esys.tools.data_processing import ( + ScalarProcessor, + load_b3_scalars, + save_df, +) + + +def annuise_investment_cost(sc): + + for var_name_cost, var_name_fixom_cost in [ + ("capacity_cost_overnight", "fixom_cost"), + ("storage_capacity_cost_overnight", "storage_fixom_cost"), + ]: + + invest_data = sc.get_unstacked_var( + [var_name_cost, "lifetime", var_name_fixom_cost] + ) + + # TODO: Currently, (storage)_capacity_overnight_cost, + # (storage)_fixom_cost and lifetime have to be given for each tech and + # each scenario, but wacc may change per scenario, but is defined for + # all techs uniformly. Could offer a more general and flexible solution. + + # wacc is defined per scenario, ignore other index levels + wacc = sc.get_unstacked_var("wacc") + wacc.index = wacc.index.get_level_values("scenario_key") + + # set wacc per scenario_key + scenario_keys = invest_data.index.get_level_values("scenario_key") + invest_data["wacc"] = wacc.loc[scenario_keys].values + + annuised_investment_cost = invest_data.apply( + lambda x: annuity(x[var_name_cost], x["lifetime"], x["wacc"]) + + x[var_name_fixom_cost], + 1, + ) + + sc.append( + var_name_cost.replace("_overnight", ""), annuised_investment_cost + ) + + sc.drop( + [ + "wacc", + "lifetime", + "capacity_cost_overnight", + "storage_capacity_cost_overnight", + "fixom_cost", + "storage_fixom_cost", + ] + ) + + +if __name__ == "__main__": + in_path = sys.argv[1] # path to raw scalar data + out_path = sys.argv[2] # path to destination + + df = load_b3_scalars(in_path) + + sc = ScalarProcessor(df) + + annuise_investment_cost(sc) + + sc.scalars = sc.scalars.sort_values( + by=["carrier", "tech", "var_name", "scenario_key"] + ) + + sc.scalars.reset_index(inplace=True, drop=True) + + sc.scalars.index.name = esys_conf.settings.general.scal_index_name + + save_df(sc.scalars, out_path) diff --git a/digipipe/esys/scripts/write_costs_efficiencies.py b/digipipe/esys/scripts/write_costs_efficiencies.py new file mode 100644 index 00000000..053f3ba9 --- /dev/null +++ b/digipipe/esys/scripts/write_costs_efficiencies.py @@ -0,0 +1,128 @@ +# coding: utf-8 +r""" +This module provides functionality to map raw scalar data on costs and +efficiencies from "store/raw/technology_data/data/raw_costs_efficiencies.csv" to +the default costs and efficiencies in +"store/datasets/esys_raw/data/scalars/default_costs_efficiencies.csv". +The default file is initially created using the "write_default_scalars" rule. + +The mapped costs and efficiencies are then written to +"store/datasets/esys_raw/data/scalars/costs_efficiencies.csv", and the original +"default_costs_efficiencies.csv" file is deleted. +""" +import os +import sys + +from digipipe.esys.esys.config.esys_conf import add_snake_logger, settings +from digipipe.esys.esys.tools.data_processing import ( + load_b3_scalars, + multi_filter_df, + save_df, +) + + +def check_var_value_empty(df, cols): + """ + Checks if specified columns in a DataFrame have empty values. + + Parameters + ---------- + df : pandas.DataFrame + The DataFrame to check. + cols : list + List of column names to check. + + Raises + ------ + ValueError + If one or more columns contain non-empty values. + + """ + for col in cols: + if not (df[col].isna() | (df[col] == "None")).all(): + raise ValueError( + "There are not empty values in one or more of the columns " + f"'{cols}' of {path_default_costs_eff}.\n" + f"Please make sure that the columns '{cols}' are empty." + ) + + +def map_var_value_costs_effs(df_1, df_2, cols): + """ + Maps values from df_2 to df_1 based on matching variable names, carriers, + and technologies. + + Parameters + ---------- + df_1 : pandas.DataFrame + The source DataFrame where values will be mapped. + df_2 : pandas.DataFrame + The reference DataFrame containing the values to map. + cols : list + List of column names in df_1 to update with mapped values. + + Returns + ------- + pandas.DataFrame + The modified df_1 DataFrame with mapped values. + + Raises + ------ + ValueError + If a matching combination of var_name, carrier, and tech is not found + in df_2. + + """ + + for index, row in df_1.iterrows(): + var_name = row["var_name"] + carrier = row["carrier"] + tech = row["tech"] + df_3 = multi_filter_df( + df_2, var_name=var_name, carrier=carrier, tech=tech + ) + + if not df_3.empty: + for col in cols: + df_1.loc[index, col] = df_3[col].values[0] + else: + raise ValueError( + f"Value of var_name '{var_name}', carrier '{carrier}' and tech " + f"'{tech}' is missing in {path_raw_costs_eff}." + ) + + return df_1 + + +if __name__ == "__main__": + path_default_costs_eff = sys.argv[1] + path_raw_costs_eff = sys.argv[2] + path_costs_eff = sys.argv[3] + + logger = add_snake_logger("data_processing") + + default_costs_eff = load_b3_scalars(path_default_costs_eff) + raw_costs_eff = load_b3_scalars(path_raw_costs_eff) + + # Name of Columns to be filled + target_cols = ["var_value", "var_unit", "source", "comment"] + + # Check if no values written in target_cols of + # default_costs_efficiencies.csv + check_var_value_empty(default_costs_eff, target_cols) + + # Map values from raw_costs_efficiencies.csv with + # default_costs_efficiencies.csv + costs_eff = map_var_value_costs_effs( + default_costs_eff, raw_costs_eff, target_cols + ) + + # Save costs_efficiencies.csv to store/datasets/esys_raw/data/scalars + save_df(costs_eff, path_costs_eff) + + # Delete file default_costs_efficiencies.csv + if settings.write_costs_efficiencies.delete_default: + if os.path.exists(path_default_costs_eff): + os.remove(path_default_costs_eff) + # Print user info + logger.info(f"The file {path_default_costs_eff} has been deleted.") diff --git a/digipipe/esys/scripts/write_default_scalars.py b/digipipe/esys/scripts/write_default_scalars.py new file mode 100644 index 00000000..6caf852e --- /dev/null +++ b/digipipe/esys/scripts/write_default_scalars.py @@ -0,0 +1,236 @@ +# coding: utf-8 +r""" +This module contains functionality to update empty scalars (generated from +scenarios with create_empty_scalars module) with default data and saves it to a +new csv file. +""" + +import sys + +import numpy as np +import pandas as pd + +from digipipe.esys.esys.config.esys_conf import write_default_scalars +from digipipe.esys.esys.tools.data_processing import ( + filter_df, + load_b3_scalars, + save_df, +) + + +def clear_input_parameters_of_storages(_df): + """ + Deletes empty dictionaries from "var_value" where "var_name" is + "input_parameters" and "type" is "storage". This allows default values to + be written to these fields. + + Parameters + ---------- + _df : pandas.DataFrame + DataFrame containing the data. + + Returns + ------- + None + This function modifies the "_df" DataFrame in-place. + + """ + empty_sc_df.loc[ + (empty_sc_df["var_name"] == "input_parameters") + & (empty_sc_df["type"] == "storage"), + "var_value", + ] = None + + +def get_var_value_and_comment(which): + """ + Returns the variable value and a comment based on the input argument. + + Parameters + ---------- + which : str + string indicating what var_value and comment are chosen as default. + Valid options are: zeros, empty, high_costs, false and empty_dict. + + Returns + ------- + var_value : object + The updated variable value + comment : str + A comment for the var_value update + """ + if which == "zeros": + var_value = 0 + comment = "Zero" + elif which == "empty": + var_value = np.nan + comment = "Empty" + elif which == "high_costs": + var_value = 1000000000 + comment = "High slack cost on shortage" + elif which == "false": + var_value = False + comment = "Empty" + elif which == "empty_dict": + var_value = "{}" + comment = "Empty" + elif which == "variable_costs": + var_value = '{"variable_costs": 1e-7}' + comment = "Own assumption to prevent hidden curtailment" + elif which == "emissions_not_modeled": + var_value = 0 + comment = "No fossils in 2045 in the modelled sectors." + elif which == "emission_reduction_factor": + var_value = 1 + comment = "100 % of GHG reduction in 2045" + else: + raise ValueError( + f"'{which}' is not a valid option. Please provide a valid options. " + f"Valid options are: zeros, empty, high_costs, false and " + f"empty_dict." + ) + + return var_value, comment + + +def get_filter_condition(_df, _var_name, _type, _tech): + """ + Returns a filter condition based on the input arguments. + + Parameters + ---------- + _df : pd.DataFrame + DataFrame containing the empty scalar data + _var_name : str + Name of the variable to be updated + _type : str + Type to be updated + _tech : str + Technology to be updated + + Returns + ------- + condition : pd.Series + A Boolean series used to filter the DataFrame to update the specified + variable + """ + precondition = _df["var_value"].isna() + if (_type != "None") and (_tech != "None"): + condition = ( + precondition + & (_df["var_name"] == _var_name) + & (_df["type"] == _type) + & (_df["tech"] == _tech) + ) + elif (_type != "None") and (_tech == "None"): + condition = ( + precondition + & (_df["var_name"] == _var_name) + & (_df["type"] == _type) + ) + elif (_type == "None") and (_tech != "None"): + condition = ( + precondition + & (_df["var_name"] == _var_name) + & (_df["tech"] == _tech) + ) + else: + condition = precondition & (_df["var_name"] == _var_name) + + return condition + + +def update_df(_df, which, condition, unit): + """ + Update the input DataFrame by setting values for specified columns based on + the provided condition. + + Parameters + ---------- + _df : pandas.DataFrame + The DataFrame to update. + which : str + Specifies the type of value to set for the "var_value" column. + Valid options are "zeros", "empty", "high_costs", "false", and + "empty_dict". + condition : pandas.Series of bools + A boolean mask indicating which rows of `_df` should be updated. + unit : str + The unit to write into the "var_unit" column. + + Returns + ------- + pandas.DataFrame + The updated DataFrame. + """ + var_value, comment = get_var_value_and_comment(which) + + _df.loc[condition, "var_value"] = var_value + _df.loc[condition, "comment"] = comment + _df.loc[condition, "var_unit"] = unit + + return _df + + +if __name__ == "__main__": + path_empty_sc = sys.argv[1] + path_default_sc = sys.argv[2] + path_default_costs_eff = sys.argv[3] + + empty_sc_df = load_b3_scalars(path_empty_sc) + + clear_input_parameters_of_storages(empty_sc_df) + + write_empty_scalars_dict = write_default_scalars.write_default_scalars + + for key, value in write_empty_scalars_dict.items(): + + condition = get_filter_condition( + empty_sc_df, value["var_name"], value["type"], value["tech"] + ) + df_updated = update_df( + empty_sc_df, value["which"], condition, value["var_unit"] + ) + + # Get all unique values for var_name + var_names_all = list(df_updated["var_name"].unique()) + # Get all values for var_name in context of costs and efficiencies + var_names_costs_efficiencies = list( + write_default_scalars.costs_efficiencies + ) + # Get all remaining var_values needed for default_scalars.csv + var_names_scalars = [ + var_name + for var_name in var_names_all + if var_name not in var_names_costs_efficiencies + ] + + df_costs_efficiencies = filter_df( + df_updated, "var_name", var_names_costs_efficiencies + ) + + # Get all already by default set values of costs and efficiencies + df_default_costs_efficiencies = df_costs_efficiencies.dropna( + subset=["var_value"] + ) + + # Keep only non default values of costs and efficiencies + df_costs_efficiencies = df_costs_efficiencies[ + df_costs_efficiencies["var_value"].isna() + ] + + # Get remaining scalars + df_scalars = filter_df(df_updated, "var_name", var_names_scalars) + + # Append all values set by default of costs and efficiencies to remaining + # scalars + df_scalars = pd.concat( + [df_scalars, df_default_costs_efficiencies], ignore_index=False + ) + + # Write all attributes attached to costs and efficiencies in separate + # default_cost_efficiencies.csv file + save_df(df_costs_efficiencies, path_default_costs_eff) + + # Write all other scalars in default_scalars.csv + save_df(df_scalars, path_default_sc) diff --git a/digipipe/esys/scripts/write_ts.py b/digipipe/esys/scripts/write_ts.py new file mode 100644 index 00000000..94f6d52b --- /dev/null +++ b/digipipe/esys/scripts/write_ts.py @@ -0,0 +1,229 @@ +# coding: utf-8 +r""" +This module provides functionality to map time series from the subdirectories +in "store/datasets/" to the empty time series in +"store/datasets/esys_raw/data/time_series/empty_ts_efficiencies.csv", +"store/datasets/esys_raw/data/time_series/empty_ts_feedin.csv" and +"store/datasets/esys_raw/data/time_series/empty_ts_load.csv". +The empty files are initially created using the "create_empty_ts" rule. + +The mapped time series are then written to +"store/datasets/esys_raw/data/scalars/ts_efficiencies.csv", +"store/datasets/esys_raw/data/scalars/ts_feedin.csv" and +"store/datasets/esys_raw/data/scalars/ts_load.csv". + +The original files with the empty time series are deleted. +""" + +import os +import sys + +import pandas as pd + +from digipipe.esys.esys.config.esys_conf import add_snake_logger, map_ts +from digipipe.esys.esys.tools.data_processing import ( + HEADER_B3_TS, + load_b3_timeseries, + save_df, + stack_timeseries, + unstack_timeseries, +) +from digipipe.store.utils import get_abs_dataset_path + + +def check_if_ts_name_in_maps_ts(ts_name, mapping_file_name): + """ + Check if a time series name is present in the mapping of time series. + + Parameters + ---------- + ts_name : str + The name of the time series to check. + mapping_file_name : str + The name of the mapping file for reference in the error message. + + Raises + ------ + ValueError + If the time series name is missing in the mapping. + + """ + if ( + ts_name not in map_ts.efficiencies + and ts_name not in map_ts.feedin + and ts_name not in map_ts.load + ): + raise ValueError( + f"Missing mapping of time series '{ts_name}'. " + f"Please provide a key and value for each time " + f"series with the file '{mapping_file_name}'." + ) + + +def get_datasets_data_name(key, which): + """ + Retrieve the specific data value from the time series based on the provided + key and description. + + Parameters + ---------- + key : str + The name of the data to retrieve. + which : str + Specifies the kind of time series data. + Valid options are "efficiencies", "feedin", and "load". + + Returns + ------- + value : object + The data value corresponding to the given key and description. + + Raises + ------ + ValueError + If an invalid option is provided for the 'which' parameter. + + """ + if which == "efficiencies": + value = map_ts.efficiencies[key] + elif which == "feedin": + value = map_ts.feedin[key] + elif which == "load": + value = map_ts.load[key] + else: + raise ValueError( + "Please provide a valid description of the time series with " + f"'which'. '{which}' is not a valid option." + ) + + return value + + +def check_file_exists(path, file_name, df, mapping_file_name): + """ + Check if a file exists at the specified path and raise a FileNotFoundError + if not. + + Parameters + ---------- + path : str + The path to the file. + file_name : str + The name of the file (without the file extension). + df : pd.DataFrame + The DataFrame containing the variable name to be mapped. + mapping_file_name : str + The name of the mapping file for reference in the error message. + + Raises + ------ + FileNotFoundError + If the file does not exist at the specified path. + + """ + if not path: + raise FileNotFoundError( + f"The file '{file_name}.csv' could not be found. Please provide a " + f"valid file name with '{mapping_file_name}' to map " + f"'{df['var_name']}'." + ) + + +def write_ts_data(stacked_ts, data): + """ + Write time series data to the specified stacked time series DataFrame. + + Parameters + ---------- + stacked_ts : pd.DataFrame + The stacked time series DataFrame. + data : pd.DataFrame + The data to write into the stacked time series. + + Returns + ------- + pd.DataFrame + The updated stacked time series DataFrame. + + """ + unstacked_data = unstack_timeseries(stacked_ts) + unstacked_data.iloc[:, 0] = data.iloc[:, 0].values + + stacked_data = stack_timeseries(unstacked_data) + stacked_ts.loc[:, "series"] = stacked_data["series"].values + return stacked_ts + + +def map_over_var_name_ts(path_ts, which, path_ts_updated): + """ + Map and update time series data in directory 'path_ts' based on var_name. + Save the updated time series to the directory 'path_ts_updated'. + Delete the origin time series (empty_ts_...) in 'path_ts' directory. + + Parameters + ---------- + path_ts : str + The path to the original time series file. + which : str + Specifies the type of data value to map and update. + path_ts_updated : str + The path to save the updated time series file. + + Returns + ------- + None + + """ + # Create empty Dataframe for the updated time series + updated_ts_df = pd.DataFrame(columns=HEADER_B3_TS) + + # Load set of time series + ts_set = load_b3_timeseries(path_ts) + + # Loop over rows (set) of ts + for index, row in ts_set.iterrows(): + check_if_ts_name_in_maps_ts(row["var_name"], "map_ts.yml") + datasets_file_name = get_datasets_data_name(row["var_name"], which) + datasets_file_path = get_abs_dataset_path( + "datasets", datasets_file_name.get("dataset"), data_dir=True + ) / datasets_file_name.get("file") + + check_file_exists( + datasets_file_path, datasets_file_name, row, "map_ts.yml" + ) + + df_new_data_ts = pd.read_csv(datasets_file_path, usecols=[1], sep=",") + + ts_updated = write_ts_data( + ts_set.iloc[[index], :].copy(), df_new_data_ts + ) + + updated_ts_df = pd.concat( + [updated_ts_df, ts_updated], ignore_index=False + ) + + updated_ts_df.index.name = ts_updated.index.name + + save_df(updated_ts_df, path_ts_updated) + + os.remove(path_ts) + # Print user info + logger.info(f"The file {path_ts} has been deleted.") + + +if __name__ == "__main__": + path_empty_ts_efficiencies = sys.argv[1] + path_empty_ts_feedin = sys.argv[2] + path_empty_ts_load = sys.argv[3] + + path_ts_efficiencies = sys.argv[4] + path_ts_feedin = sys.argv[5] + path_ts_load = sys.argv[6] + + logger = add_snake_logger("data_processing") + + map_over_var_name_ts( + path_empty_ts_efficiencies, "efficiencies", path_ts_efficiencies + ) + map_over_var_name_ts(path_empty_ts_feedin, "feedin", path_ts_feedin) + map_over_var_name_ts(path_empty_ts_load, "load", path_ts_load) diff --git a/digipipe/esys/snakemake_rules/build_datapackage.smk b/digipipe/esys/snakemake_rules/build_datapackage.smk new file mode 100644 index 00000000..7eddb444 --- /dev/null +++ b/digipipe/esys/snakemake_rules/build_datapackage.smk @@ -0,0 +1,24 @@ +from digipipe.esys.esys.config.esys_conf import load_yaml + +def get_paths_scenario_input(wildcards): + scenario_specs = load_yaml(f"esys/scenarios/{wildcards.scenario}.yml") + paths_scenario_inputs = list() + for key in ["paths_scalars", "paths_timeseries"]: + paths = scenario_specs[key] + if isinstance(paths, list): + paths_scenario_inputs.extend(paths) + elif isinstance(paths, str): + paths_scenario_inputs.append(paths) + return paths_scenario_inputs + +rule build_datapackage: + input: + get_paths_scenario_input, + scenario="esys/scenarios/{scenario}.yml" + output: directory("store/appdata/esys/{scenario}/preprocessed") + params: + logfile="store/appdata/esys/{scenario}/{scenario}.log" + wildcard_constraints: + # Do not use this rule for the examples. Use prepare_example instead + scenario=r"(?!example_).*" + shell: "python esys/scripts/build_datapackage.py {input.scenario} {output} {params.logfile}" diff --git a/digipipe/esys/snakemake_rules/create_empty_scalars.smk b/digipipe/esys/snakemake_rules/create_empty_scalars.smk new file mode 100644 index 00000000..edfe8755 --- /dev/null +++ b/digipipe/esys/snakemake_rules/create_empty_scalars.smk @@ -0,0 +1,7 @@ +rule create_empty_scalars: + """ + Create empty scalar files + """ + input: "esys/scenarios/" + output: "store/datasets/esys_raw/data/scalars/empty_scalars.csv" + shell: "python esys/scripts/create_empty_scalars.py {input} {output}" diff --git a/digipipe/esys/snakemake_rules/create_empty_ts.smk b/digipipe/esys/snakemake_rules/create_empty_ts.smk new file mode 100644 index 00000000..6ba81936 --- /dev/null +++ b/digipipe/esys/snakemake_rules/create_empty_ts.smk @@ -0,0 +1,11 @@ +rule create_empty_ts: + """ + Create empty timeseries files + """ + input: "esys/scenarios/" + output: + "store/datasets/esys_raw/data/time_series/empty_ts_load.csv", + "store/datasets/esys_raw/data/time_series/empty_ts_feedin.csv", + "store/datasets/esys_raw/data/time_series/empty_ts_efficiencies.csv" + + shell: "python esys/scripts/create_empty_ts.py {input} {output[0]} {output[1]} {output[2]}" diff --git a/digipipe/esys/snakemake_rules/optimization.smk b/digipipe/esys/snakemake_rules/optimization.smk new file mode 100644 index 00000000..7b2af8f2 --- /dev/null +++ b/digipipe/esys/snakemake_rules/optimization.smk @@ -0,0 +1,6 @@ +rule optimize: + input: "store/appdata/esys/{scenario}/preprocessed" + output: directory("store/appdata/esys/{scenario}/optimized/") + params: + logfile="store/appdata/esys/{scenario}/{scenario}.log" + shell: "python esys/scripts/optimize.py {input} {output} {params.logfile}" diff --git a/digipipe/esys/snakemake_rules/postprocessing.smk b/digipipe/esys/snakemake_rules/postprocessing.smk new file mode 100644 index 00000000..d2847aed --- /dev/null +++ b/digipipe/esys/snakemake_rules/postprocessing.smk @@ -0,0 +1,6 @@ +rule postprocess: + input: "store/appdata/esys/{scenario}/optimized" + output: directory("store/appdata/esys/{scenario}/postprocessed/") + params: + logfile="store/appdata/esys/{scenario}/{scenario}.log" + shell: "python esys/scripts/postprocess.py {input} {wildcards.scenario} {output} {params.logfile}" diff --git a/digipipe/esys/snakemake_rules/prepare_resource.smk b/digipipe/esys/snakemake_rules/prepare_resource.smk new file mode 100644 index 00000000..c578faa8 --- /dev/null +++ b/digipipe/esys/snakemake_rules/prepare_resource.smk @@ -0,0 +1,5 @@ +rule prepare_scalars: + input: + raw_scalars="store/datasets/esys_raw/data/scalars/costs_efficiencies.csv", + output: "store/appdata/esys/_resources/scal_costs_efficiencies.csv" + shell: "python esys/scripts/prepare_scalars.py {input.raw_scalars} {output}" diff --git a/digipipe/esys/snakemake_rules/write_costs_efficiencies.smk b/digipipe/esys/snakemake_rules/write_costs_efficiencies.smk new file mode 100644 index 00000000..ac379fcd --- /dev/null +++ b/digipipe/esys/snakemake_rules/write_costs_efficiencies.smk @@ -0,0 +1,9 @@ +rule write_costs_efficiencies: + """ + Write costs and efficiencies from raw data to esys datasets + """ + input: + "store/datasets/esys_raw/data/scalars/default_costs_efficiencies.csv", + "store/raw/technology_data/data/raw_costs_efficiencies.csv", + output: "store/datasets/esys_raw/data/scalars/costs_efficiencies.csv" + shell: "python esys/scripts/write_costs_efficiencies.py {input} {output}" diff --git a/digipipe/esys/snakemake_rules/write_default_scalars.smk b/digipipe/esys/snakemake_rules/write_default_scalars.smk new file mode 100644 index 00000000..d0992764 --- /dev/null +++ b/digipipe/esys/snakemake_rules/write_default_scalars.smk @@ -0,0 +1,9 @@ +rule write_default_scalars: + """ + Write default values in scalar data files + """ + input: "store/datasets/esys_raw/data/scalars/empty_scalars.csv" + output: + "store/datasets/esys_raw/data/scalars/default_scalars.csv", + "store/datasets/esys_raw/data/scalars/default_costs_efficiencies.csv", + shell: "python esys/scripts/write_default_scalars.py {input} {output}" diff --git a/digipipe/esys/snakemake_rules/write_ts.smk b/digipipe/esys/snakemake_rules/write_ts.smk new file mode 100644 index 00000000..ab94168a --- /dev/null +++ b/digipipe/esys/snakemake_rules/write_ts.smk @@ -0,0 +1,10 @@ +rule write_ts: + input: + "store/datasets/esys_raw/data/time_series/empty_ts_efficiencies.csv", + "store/datasets/esys_raw/data/time_series/empty_ts_feedin.csv", + "store/datasets/esys_raw/data/time_series/empty_ts_load.csv", + output: + "store/datasets/esys_raw/data/time_series/ts_efficiencies.csv", + "store/datasets/esys_raw/data/time_series/ts_feedin.csv", + "store/datasets/esys_raw/data/time_series/ts_load.csv", + shell: "python esys/scripts/write_ts.py {input} {output}" diff --git a/digipipe/scenarios/SCENARIOS.md b/digipipe/scenarios/SCENARIOS.md new file mode 100644 index 00000000..1de8f146 --- /dev/null +++ b/digipipe/scenarios/SCENARIOS.md @@ -0,0 +1,3 @@ +# Scenarios + +**TBD** diff --git a/digipipe/store/1_converted/.gitkeep b/digipipe/scripts/__init__.py similarity index 100% rename from digipipe/store/1_converted/.gitkeep rename to digipipe/scripts/__init__.py diff --git a/digipipe/scripts/config.py b/digipipe/scripts/config.py new file mode 100644 index 00000000..6a02d241 --- /dev/null +++ b/digipipe/scripts/config.py @@ -0,0 +1,78 @@ +""" +Helper functions for configs +""" + +import os +from pathlib import Path + +import yaml + +from digipipe.store.utils import get_abs_store_root_path + + +def read_config(file: Path) -> dict: + """Reads a yml config and returns as dict + + Parameters + ---------- + file : str + Full path to config file to read + + Returns + ------- + dict + Config dict + """ + with open(file, "r") as cfg_file: + try: + cfg = yaml.safe_load(cfg_file) or {} + except yaml.YAMLError as exc: + print(f"Error while reading config file: {file}") + print(exc) + return cfg + + +def load_dataset_configs() -> dict: + """Load datasets' yml configs and merge them using the directory tree for + naming. + + Parameters + ---------- + config_files : list of str + List of paths to config files + Returns + ------- + dict + Config dict of format: + {"store": + {"category": + {"": {}, ...} + } + } + """ + + def search_store_configs() -> list: + """Search for configs (*.yml) in data store, exclude templates. + + Returns + ------- + list + Paths to config files + """ + cfg_files = list() + for root, dirs, files in os.walk(get_abs_store_root_path()): + for file in files: + if (file == "config.yml") and ".TEMPLATE" not in str(root): + cfg_files.append(Path(os.path.join(root, file))) + return cfg_files + + merged_cfg = dict() + for file in search_store_configs(): + path = Path(file).resolve() + section = path.parent.parent.name + subsection = path.parent.name + cfg = read_config(file) + if merged_cfg.get(section, None) is None: + merged_cfg[section] = {} + merged_cfg[section][subsection] = cfg + return {"store": merged_cfg} diff --git a/digipipe/scripts/data_io.py b/digipipe/scripts/data_io.py new file mode 100644 index 00000000..ec13380e --- /dev/null +++ b/digipipe/scripts/data_io.py @@ -0,0 +1,120 @@ +import json +import os +import shutil +import zipfile +from pathlib import Path + +import requests + + +def load_json(file_path: Path) -> dict: + with open(file_path, "r") as f: + return json.load(f) + + +def download_file(url: str, dest_file: str) -> None: + """Downloads file from url to specified destination. + + Parameters + ---------- + url : str + The URL string of the file to be downloaded. + dest_file : str + The destination path/filename of the file to be downloaded. + """ + print(f"Downloading from {url}") + with requests.get(url, stream=True, timeout=10) as r: + r.raise_for_status() + with open(dest_file, "wb") as f: + for i, block in enumerate(r.iter_content(chunk_size=1024)): + f.write(block) + downloaded_size = i * 1024 / 1e6 + print(f"{downloaded_size:.2f} MB...", end="\r") + print(f"Download completed to {dest_file}") + + +def extract_zipfile(zip_path: str, dest_path: str) -> None: + """Extracts zipfile to destination path + + Parameters + ---------- + zip_path : str + Path to the zipfile to be extracted + dest_path : str + Destination path where the contents of zipfile are to be extracted + """ + with zipfile.ZipFile(zip_path, "r") as zip_ref: + zip_ref.extractall(dest_path) + + +def copy_files(src_path: str, dest_path: str) -> None: + """Copies files from source to destination directory + + Parameters + ---------- + src_path : str + Source path of directory to copy from + dest_path : str + Destination path of directory to copy to + """ + # Get a list of directories within source directory + dir_list = [ + d + for d in os.listdir(src_path) + if os.path.isdir(os.path.join(src_path, d)) + ] + # Loop through each directory and copy files within `data` directory + for d in dir_list: + src_dir = os.path.join(src_path, d, "data") + dst_dir = os.path.join(dest_path, d, "data") + # Check if destination directory for data already exists + if os.path.exists(dst_dir): + files_to_copy = [] + for file in os.listdir(src_dir): + src_file = os.path.join(src_dir, file) + dst_file = os.path.join(dst_dir, file) + if os.path.isfile(dst_file): + if file == ".gitkeep": + continue + print(f"\n'{file}' already exists in '{d}/data'.") + overwrite_file = input("Do you want to update it? (y/n) ") + while overwrite_file.lower() not in ["y", "n"]: + overwrite_file = input( + """Invalid input. Enter 'y' or 'n': """ + ) + if overwrite_file.lower() == "y": + shutil.copy(src_file, dst_file) + print(f"'{file}' updated.") + else: + continue + else: + files_to_copy.append(file) + + for file in files_to_copy: + src_file = os.path.join(src_dir, file) + dst_file = os.path.join(dst_dir, file) + shutil.copy(src_file, dst_file) + print(f"\n'{file}' added to '{d}/data'.") + # If directory doesn't exist, copy it entirely + else: + shutil.copytree(src_dir, dst_dir) + print(f"\nAdded '{d}' to 'store/raw'.") + + +def clean_folder(folder_path: str) -> None: + """Cleans up a given folder by removing all files and directories within it + (except for .gitkeep) + + Parameters + ---------- + folder_path : str + Path to the folder that should be cleaned + """ + # Clean temp folder + folder_content = os.listdir(folder_path) + for item in folder_content: + item_path = os.path.join(folder_path, item) + if os.path.isfile(item_path) and item != ".gitkeep": + os.remove(item_path) + elif os.path.isdir(item_path): + shutil.rmtree(item_path) diff --git a/digipipe/store/2_preprocessed/.gitkeep b/digipipe/scripts/datasets/__init__.py similarity index 100% rename from digipipe/store/2_preprocessed/.gitkeep rename to digipipe/scripts/datasets/__init__.py diff --git a/digipipe/scripts/datasets/demand.py b/digipipe/scripts/datasets/demand.py new file mode 100644 index 00000000..8454ee64 --- /dev/null +++ b/digipipe/scripts/datasets/demand.py @@ -0,0 +1,247 @@ +"""Shared functions for processing electricity and heat demand datasets""" + +from pathlib import Path + +import geopandas as gpd +import numpy as np +import pandas as pd + + +def normalize_filter_timeseries( + infile: Path, outfile: Path, region_nuts: list = None +) -> None: + """Extract timeseries for specific districts, merge and normalize them to + 1 (MWh) + + Parameters + ---------- + infile : pathlib.Path + Path to timeseries CSV with NUTS 3 codes in columns and + timesteps in rows (expects 15min ot 1h resolution) + outfile : pathlib.Path + Path to CSV outfile - aggregated and normalized timeseries + (1h resolution) + region_nuts : list of str + List of NUTS 3 codes whose timeseries are to be used + + Returns + ------- + None + """ + # Get timeseries, create timeindex, filter region, resample to 1h + timeseries = pd.read_csv(infile) + timeseries = timeseries[region_nuts] + + if len(timeseries) == 8760: + timeseries.index = pd.DatetimeIndex( + pd.date_range( + start="2017-01-01 00:00:00", + end="2017-12-31 23:45:00", + freq="1H", + ) + ) + elif len(timeseries) == 35040: + timeseries.index = pd.DatetimeIndex( + pd.date_range( + start="2017-01-01 00:00:00", + end="2017-12-31 23:45:00", + freq="15Min", + ) + ) + timeseries = timeseries.resample("H").sum() + else: + raise ValueError("Invalid number of rows in timeseries!") + + # Average SLP timeseries and normalize to 1 MWh + timeseries = timeseries.sum(axis=1) + timeseries = timeseries.div(timeseries.sum()).reset_index(drop=True) + timeseries.name = "demand_norm" + + # Check result + np.testing.assert_almost_equal( + timeseries.sum(), + 1, + err_msg="Sum of normalized timeseries is not 1!", + ) + + # Write + timeseries.to_csv(outfile) + + +def merge_demand_multiple_years( + infiles: list, + outfile: Path, +) -> None: + """Merge demand from different years into one + + Parameters + ---------- + infiles : list of Path + CSV with each holding demand per municipality for one year + outfile : pathlib.Path + CSV with demand per municipality for all years + + Returns + ------- + None + """ + demand = pd.concat( + [pd.read_csv(f, index_col="municipality_id") for f in infiles], + axis=1, + ) + demand.to_csv(outfile) + + +def disaggregate_demand_to_municipality( + demand_districts: pd.DataFrame, + muns: gpd.GeoDataFrame, + districts: gpd.GeoDataFrame, + disagg_data: pd.DataFrame, + disagg_data_col: str, +) -> pd.DataFrame: + """Disaggregates NUTS 3 consumption to municipalities by linear scaling + using factors from `disagg_data`. + + Parameters + ---------- + demand_districts : pd.DataFrame + Annual demand per district (NUTS 3 level) + muns : gpd.GeoDataFrame + Municipalities + districts : gpd.GeoDataFrame + Federal districts + disagg_data : pd.DataFrame + DF that contains municipal data to scale demand by + disagg_data_col : str + Name of DF column used for scaling + + Returns + ------- + pd.DataFrame + Annual demand per municipality + """ + # Get muns and their NUTS3 code + muns = muns.merge( + districts[["id", "nuts"]].rename(columns={"id": "district_id"}), + left_on="district_id", + right_on="district_id", + ) + disagg_data = ( + disagg_data.reset_index() + .merge(muns[["id", "nuts"]], left_on="municipality_id", right_on="id") + .drop(columns=["id"]) + ) + + # Calc relative share within each district + disagg_data = disagg_data.assign( + pop_share=( + disagg_data[disagg_data_col] + / disagg_data.groupby("nuts")[disagg_data_col].transform("sum") + ) + ).set_index("nuts") + + # Filter demand dataset + demand_districts = demand_districts.loc[districts.nuts.to_list()] + + # Merge demand and calc value per municipality + demand = disagg_data.merge( + demand_districts, + left_index=True, + right_index=True, + ) + demand[disagg_data_col] = demand_districts.demand_districts.mul( + demand.pop_share + ) + demand = demand[["municipality_id", disagg_data_col]].set_index( + "municipality_id" + ) + + # Check result + np.testing.assert_almost_equal( + demand.sum().sum(), + demand_districts.sum().sum(), + err_msg="Sum of disaggregated values does not match total value!", + ) + + return demand + + +def demand_prognosis( + demand_future_T45: Path, + demand_future_TN: Path, + demand_region: pd.DataFrame, + year_base: int, + year_target: int, + scale_by: str, + carrier: str = None, +) -> pd.DataFrame: + """Create demand prognosis for target year per municipality by scaling with + country data (by total demand or carrier) + + Parameters + ---------- + demand_future_T45 : pathlib.Path + Path to total demand per carrier in Germany (BMWK T45 scenario) + demand_future_TN : pathlib.Path + Path to total demand per carrier in Germany (BMWK TN scenario) + demand_region : pd.DataFrame + Annual demand per municipality (today) + year_base : int + Base year to use for calculation of reduction + year_target : int + Target year for demand + scale_by : str + One of ["total", "carrier"], scale future demand by total energy or + specific carrier given by `carrier` + carrier : str + Carrier + + + Returns + ------- + pd.DataFrame + Annual demand per municipality (prognosis) + """ + if scale_by not in ["total", "carrier"]: + raise ValueError("scale_by must be one of ['total', 'carrier']") + if scale_by == "carrier": + if carrier is None: + raise ValueError("carrier must be provided when scale_by='carrier'") + + # Get data from future scenarios + demand_future_T45 = pd.read_csv(demand_future_T45).set_index( + ["year", "carrier"] + ) + demand_future_TN = pd.read_csv(demand_future_TN) + + # Interpolate for base year + demand_future_TN = demand_future_TN.set_index(["year", "carrier"]).append( + pd.DataFrame( + index=pd.MultiIndex.from_product( + [[year_base], demand_future_TN.carrier.unique(), [np.nan]], + names=demand_future_TN.columns, + ) + ) + ) + demand_future_TN.sort_index(inplace=True) + demand_future_TN = demand_future_TN.unstack(level=1).interpolate().stack() + + # Scale by total or carrier demand + if scale_by == "total": + # Total reduction factor + reduction_factor = ( + demand_future_T45.loc[year_target].demand.sum() + / demand_future_TN.loc[year_base].demand.sum() + ) + elif scale_by == "carrier": + # Reduction factor for carrier + reduction_factor = ( + demand_future_T45.loc[year_target, carrier].demand + / demand_future_TN.loc[year_base, carrier].demand + ) + print( + f"Total demand in {year_target}: {round(reduction_factor, 2)} of " + f"{year_base}'s demand." + ) + + return demand_region * reduction_factor diff --git a/digipipe/scripts/datasets/mastr.py b/digipipe/scripts/datasets/mastr.py new file mode 100644 index 00000000..3db09b0b --- /dev/null +++ b/digipipe/scripts/datasets/mastr.py @@ -0,0 +1,483 @@ +"""Shared functions for processing data from MaStR dataset""" + +from typing import Tuple, Union + +import geopandas as gpd +import pandas as pd +from geopy.exc import GeocoderUnavailable +from geopy.extra.rate_limiter import RateLimiter +from geopy.geocoders import Nominatim + + +def cleanse( + units: Union[pd.DataFrame, gpd.GeoDataFrame] +) -> Union[pd.DataFrame, gpd.GeoDataFrame]: + """Do some basic cleansing of MaStR unit data. + + This involves: + * country + + Parameters + ---------- + units : pd.DataFrame or gpd.GeoDataFrame + Units from MaStR + + Returns + ------- + pd.DataFrame or gpd.GeoDataFrame + Filtered units + """ + units = units.loc[(units.country == "Deutschland")] + + # Drop unnecessary columns + units.drop( + columns=[ + "country", + ], + inplace=True, + ) + + return units + + +def apply_manual_corrections( + units_df: pd.DataFrame, + units_correction_df: pd.DataFrame, +) -> pd.DataFrame: + """Correct units using manual correction dataset + + Parameters + ---------- + units_df : pd.DataFrame + Units from MaStR + units_correction_df : pd.DataFrame + Correction data + Returns + ------- + pd.DataFrame + Corrected units + """ + for attr in units_correction_df.wrong_attr.unique(): + # Correct site type (roof- or ground-mounted) + if attr == "site_type": + units_correction_site_df = units_correction_df.copy().loc[ + units_correction_df.wrong_attr == "site_type" + ][["correction"]] + units_df.loc[ + units_correction_site_df.index, "Lage" + ] = units_correction_site_df["correction"] + print( + f"Applied {len(units_correction_site_df)} " + f"corrections for column: {attr}" + ) + # Correct geometry + elif attr == "geometry": + units_correction_geom_df = units_correction_df.copy() + units_correction_geom_df[["x", "y"]] = ( + units_correction_geom_df.loc[ + (units_correction_geom_df.wrong_attr == "geometry") + & (units_correction_geom_df.correction != "None") + ] + .correction.str.split(",", expand=True) + .astype(float) + ) + units_correction_geom_df = gpd.GeoDataFrame( + units_correction_geom_df, + geometry=gpd.points_from_xy( + units_correction_geom_df.x, + units_correction_geom_df.y, + crs=3035, + ), + crs=3035, + ).to_crs(4326) + units_correction_geom_df = units_correction_geom_df.loc[ + ~units_correction_geom_df.geometry.is_empty + ] + units_correction_geom_df = units_correction_geom_df.assign( + lon=units_correction_geom_df.geometry.x, + lat=units_correction_geom_df.geometry.y, + ) + units_df.loc[ + units_correction_geom_df.index, ["Laengengrad", "Breitengrad"] + ] = units_correction_geom_df[["lon", "lat"]] + print( + f"Applied {len(units_correction_geom_df)} " + f"corrections for column: {attr}" + ) + # If attribute does not exist + else: + raise NotImplementedError( + f"Correction of PV roof for attribute '{attr}' is not supported" + ) + return units_df.reset_index() + + +def add_voltage_level( + units_df: pd.DataFrame, + locations_path: str, + gridconn_path: str, + drop_location_id: bool = True, +) -> pd.DataFrame: + """Add voltage level to units from MaStR using locations and grid + connection points. + + Parameters + ---------- + units_df : pd.DataFrame + Units from MaStR + locations_path : str + Path to MaStR locations file + gridconn_path : str + Path to MaStR grid connection points file + drop_location_id : bool + Drop location id in the end + + Returns + ------- + pd.DataFrame + Units with column `voltage_level` + """ + # Get locations and grid connection point and merge both + locations = pd.read_csv( + locations_path, + usecols=["MastrNummer", "Netzanschlusspunkte"], + ).rename(columns={"MastrNummer": "mastr_location_id2"}) + gridconn = pd.read_csv( + gridconn_path, + usecols=["NetzanschlusspunktMastrNummer", "Spannungsebene"], + ) + locations = ( + locations.merge( + gridconn, + left_on="Netzanschlusspunkte", + right_on="NetzanschlusspunktMastrNummer", + how="left", + ) + .drop_duplicates() + .rename(columns={"Spannungsebene": "voltage_level"}) + ) + + # Add voltage level to units + units_df = units_df.reset_index().merge( + locations[["mastr_location_id2", "voltage_level"]], + left_on="mastr_location_id", + right_on="mastr_location_id2", + how="left", + ) + + # Drop unnecessary columns + cols = ( + ["mastr_location_id", "mastr_location_id2"] + if drop_location_id + else ["mastr_location_id2"] + ) + units_df.drop( + columns=cols, + inplace=True, + ) + + return units_df + + +def add_geometry( + units_df: pd.DataFrame, + drop_units_wo_coords: bool = True, +) -> gpd.GeoDataFrame: + """ + Add `geometry` column to MaStR unit data using `lat` and `lon` values. + Columns `lat` and `lon` are dropped + + Parameters + ---------- + units_df : pd.DataFrame + Units with columns `lat` and `lon` in CRS WGS84 (EPSG:4326) + drop_units_wo_coords : bool + Drop units which do not have valid lat and lon values. + Note: coordinates in the MaStR are only provided for plants>30 kW. + + Returns + ------- + gpd.GeoDataFrame + Units with geometry in CRS LAEA Europe (EPSG:3035) + """ + # Drop units without coords idf requested + if drop_units_wo_coords: + units_count_orig = len(units_df) + units_df = units_df.loc[(~units_df.lon.isna() & ~units_df.lat.isna())] + print( + f"{units_count_orig-len(units_df)} units have no or invalid " + f"coordinates. Their locations will be geocoded." + ) + + units_gdf = gpd.GeoDataFrame( + units_df, + geometry=gpd.points_from_xy(units_df["lon"], units_df["lat"], crs=4326), + crs=4326, + ).to_crs(3035) + + # Drop unnecessary columns + units_gdf.drop(columns=["lon", "lat"], inplace=True) + + return units_gdf + + +def geocode( + units_df: pd.DataFrame, + user_agent: str = "geocoder", + interval: int = 1, + target_crs: str = "EPSG:3035", +) -> gpd.GeoDataFrame: + """ + Geocode locations from MaStR unit table using zip code and city. + + Parameters + ---------- + units_df : pd.DataFrame + Units from MaStR. Must contain the following columns: + * zip_code (str) + * city (str) + user_agent : str + Some app name. Defaults to "geocoder" + interval : int + Delay in seconds to use between requests to Nominatim. + A minimum of 1 is advised (default) + target_crs : str + CRS the data should be reprojected to. Defaults to EPSG:3035. + + Returns + ------- + gpd.GeoDataFrame + Units with geometry + """ + + def geocoder( + user_agent: str, + interval: int, + ) -> RateLimiter: + """Setup Nominatim geocoding class. + + Parameters + ----------- + user_agent : str + Some app name. + interval : int + Delay in seconds to use between requests to Nominatim. + A minimum of 1 is advised. + + Returns + ------- + geopy.extra.rate_limiter.RateLimiter + Nominatim RateLimiter geocoding class to use for geocoding. + """ + try: + locator = Nominatim(user_agent=user_agent) + return RateLimiter( + locator.geocode, + min_delay_seconds=interval, + ) + except GeocoderUnavailable: + print("Geocoder unavailable, aborting geocoding!") + raise + + # Define geocoder + ratelimiter = geocoder( + user_agent, + interval, + ) + + # Merge zip code and city and get unique values + units_df = units_df.assign( + zip_and_city=units_df.zip_code.astype(str) + " " + units_df.city, + ) + unique_locations = pd.DataFrame( + data=units_df.zip_and_city.unique(), + columns=["zip_and_city"], + ) + # Geocode unique locations! + print( + f"Geocoding {len(unique_locations)} unique locations, this will take " + f"about {round(len(unique_locations) * interval / 60, 1)} min..." + ) + unique_locations = unique_locations.assign( + location=unique_locations.zip_and_city.apply(ratelimiter) + ) + unique_locations = unique_locations.assign( + point=unique_locations.location.apply( + lambda loc: tuple(loc.point) if loc else None + ) + ) + unique_locations[["latitude", "longitude", "altitude"]] = pd.DataFrame( + unique_locations.point.tolist(), index=unique_locations.index + ) + unique_locations = gpd.GeoDataFrame( + unique_locations, + geometry=gpd.points_from_xy( + unique_locations.longitude, unique_locations.latitude + ), + crs="EPSG:4326", + ) + # Merge locations back in units + units_gdf = gpd.GeoDataFrame( + units_df.merge( + unique_locations[["zip_and_city", "geometry"]], + on="zip_and_city", + ).drop(columns=["zip_and_city"]) + ).to_crs(target_crs) + + return units_gdf + + +def geocode_units_wo_geometry( + units_df: pd.DataFrame, + columns_agg_functions: dict, + target_crs: str = "EPSG:3035", +) -> Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]: + """ + Add locations to units without coordinates by geocoding. The units are + returned with approximated locations (1 unit per row) as well as grouped by + approximated location (1 dataset with >=1 units per row) with aggregated + attributes as given by `columns_agg_functions`. + + Parameters + ---------- + units_df : pd.DataFrame + Units without geometry. Must have columns "zip_code" and "city" as well + as the columns specified in `columns_agg_functions`. + Note: If existent, geometry column will be overwritten. + columns_agg_functions : dict + Defines how columns shall be aggregated. Format as in Pandas' .agg() + function. + Example: + { + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum") + } + In this example, the sum of `capacity_net` and number of units is + calculated and named to "capacity_net" and "unit_count", respectively. + Also, the sum of `capacity_gross` is calculated and the name is + retained. + target_crs : str + CRS the data should be reprojected to. Defaults to EPSG:3035 + + Returns + ------- + gpd.GeoDataFrame + Units with approximated location (1 unit per row). + gpd.GeoDataFrame + Units grouped by approximated location (1 dataset with >=1 units per + row) with aggregated attributes as given by `columns_agg_functions`. + """ + + def aggregate_units_wo_geometry( + units_gdf: gpd.GeoDataFrame, + ) -> gpd.GeoDataFrame: + """Aggregate units by approximated position + + Parameters + ---------- + units_gdf : gpd.GeoDataFrame + Units + Returns + ------- + gpd.GeoDataFrame + Units aggregated by position + """ + + # Aggregate units with approximated position + units_gdf["lon"] = units_gdf.geometry.x + units_gdf["lat"] = units_gdf.geometry.y + + grouping_columns = ["zip_code", "city", "lat", "lon"] + units_agg_gdf = ( + units_gdf[grouping_columns + columns_agg_names] + .groupby(grouping_columns, as_index=False) + .agg(**columns_agg_functions) + ) + + # Create geometry and select columns + units_agg_gdf = gpd.GeoDataFrame( + units_agg_gdf, + geometry=gpd.points_from_xy(units_agg_gdf.lon, units_agg_gdf.lat), + crs=target_crs, + )[ + ["zip_code", "city"] + + list(columns_agg_functions.keys()) + + ["geometry"] + ] + return units_agg_gdf.assign( + status="In Betrieb oder in Planung", + geometry_approximated=1, + ) + + # Check if all required columns are present + if not all(c in units_df.columns for c in ["zip_code", "city"]): + raise ValueError( + "Column zip_code or city not present, geocoding not possible." + ) + columns_agg_names = list({c for c, _ in columns_agg_functions.values()}) + if not all(c in units_df.columns for c in columns_agg_names): + raise ValueError( + "On or more columns requested in the aggregation functions dict " + "(columns_agg_functions) are not present, cannot proceed." + ) + + units_with_inferred_geom_gdf = geocode(units_df) + units_with_inferred_geom_gdf = units_with_inferred_geom_gdf.assign( + geometry_approximated=1, + ) + + units_with_inferred_geom_agg_gdf = aggregate_units_wo_geometry( + units_with_inferred_geom_gdf.copy() + ) + + return units_with_inferred_geom_gdf, units_with_inferred_geom_agg_gdf + + +def create_stats_per_municipality( + units_df: pd.DataFrame, + muns: gpd.GeoDataFrame, + column: str, + only_operating_units: bool = True, +) -> pd.DataFrame: + """ + Create statistics on units per municipality for one column. + Municipalities with no data in `column` are set to 0. + + Parameters + ---------- + units_df : pd.DataFrame + Units + muns : gpd.GeoDataFrame + Municipalities + column : str + Column in units_df used for aggregation + only_operating_units : bool + Use only units which are operating, no planned or decommissioned. + Defaults to true. + + Returns + ------- + pd.DataFrame + Units, aggregated per municipality + """ + + if only_operating_units is True: + units_df = units_df.loc[units_df["status"] == "In Betrieb"] + + units_df = units_df[["municipality_id", column]] + units_df = ( + units_df.groupby("municipality_id") + .agg( + column=(column, "sum"), + count=(column, "count"), + ) + .rename(columns={"column": column}) + ) + + units_df = units_df.reindex(muns.id, fill_value=0).rename( + columns={"id": "municipality_id"} + ) + units_df.index.name = "municipality_id" + + return units_df diff --git a/digipipe/scripts/geo.py b/digipipe/scripts/geo.py new file mode 100644 index 00000000..ece33554 --- /dev/null +++ b/digipipe/scripts/geo.py @@ -0,0 +1,409 @@ +""" +Helper functions for geodata processing +""" + +import os +from collections import OrderedDict +from typing import Tuple, Union + +import fiona +import geopandas as gpd +import pandas as pd +import rasterio as rio +from rasterio.mask import mask +from rasterstats import zonal_stats +from shapely.geometry.multipolygon import MultiPolygon +from shapely.ops import transform + +from digipipe.config import GLOBAL_CONFIG + + +def read_schema_from_file(file: str) -> Tuple[str, OrderedDict]: + """Read a geo file and returns schema definition using fiona + + Parameters + ---------- + file : str + Full path to file to read schema from + + Returns + ------- + str + Schema of geometry + OrderedDict + Properties/Fields of dataset (str: str) + """ + try: + with fiona.open(file) as f: + schema_in_geom = f.schema["geometry"] + schema_in_props = f.schema["properties"] + except: + f.close() + raise + return schema_in_geom, schema_in_props + + +def file_is_empty(file: str) -> bool: + """Check if file holds no geodata (is empty) + + Parameters + ---------- + file : str + Full path to file to read + + Returns + ------- + bool + True if file is empty, False otherwise + """ + with fiona.open(file) as coll: + return len(coll) == 0 + + +def convert_to_multipolygon(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame: + """Convert geometry column to type MultiPolygon + + Parameters + ---------- + gdf : :pandas:`geopandas.GeoDataFrame` + Data to be aligned + + Returns + ------- + :pandas:`geopandas.GeoDataFrame` + """ + + def remove_z(row): + """ + Remove z coordinate from Geometry, e.g. MultiPolygon (MULTIPOLYGON Z) + """ + return transform(lambda x, y, z=None: (x, y), row) + + gdf["geometry"] = [ + MultiPolygon([feature]) if feature.geom_type == "Polygon" else feature + for feature in gdf["geometry"] + ] + + gdf["geometry"] = gdf["geometry"].apply(remove_z) + + return gdf + + +def write_geofile( + gdf: gpd.GeoDataFrame, + file: str, + layer_name: str = None, + schema: dict = None, + driver: str = "GPKG", + encoding: str = "utf-8", +) -> None: + """Write geodata to file + + Parameters + ---------- + gdf : :pandas:`geopandas.GeoDataFrame` + file : str + Target file + layer_name : str + Name of layer, usually same as file basename + schema : dict + Output schema with keys "geometry" and "properties" + driver : str + Geofile driver, default is Geopackage + encoding : str + Encoding + """ + if layer_name is None: + layer_name = os.path.basename(file).split(".")[0] + if schema is None: + # TODO: Log warning + pass + + # check if data contain multiple geometry types + if len(gdf.geometry.type.unique()) > 1: + types = gdf.geometry.type.unique() + raise ValueError(f"Data contain multiple geometry types: {types} !") + + gdf.to_file( + file, layer=layer_name, schema=schema, driver=driver, encoding=encoding + ) + + +def rename_filter_attributes( + gdf: Union[pd.DataFrame, gpd.GeoDataFrame], + attrs_filter_by_values: dict = None, + attrs_mapping: dict = None, +) -> Union[pd.DataFrame, gpd.GeoDataFrame]: + """Rename attributes and filter them by values + + Note: Only attributes as given by `attrs_mapping` are kept! + + Parameters + ---------- + gdf : pd.DataFrame or gpd.GeoDataFrame + Geodata + attrs_filter_by_values : dict + Attributes whose values are to be filtered. Use attributes as dict + keys and desired values as dict values (values can be of type str, + int, float or list) + Example: {"GF": 4, "NUTS": ["DEE01", "DEE05", "DEE0E"]} + attrs_mapping : dict + Attributes to select and rename. Use original attributes' names as + dict keys and new names as values. + + Returns + ------- + pd.DataFrame or gpd.GeoDataFrame + Filtered geodata + """ + # Filter by attribute values, if defined + if attrs_filter_by_values is not None: + list_vals = [] + query = "" + for k, v in attrs_filter_by_values.items(): + if isinstance(v, list): + list_vals.append(v) + query += f" & {k} in @list_vals[{len(list_vals)-1}]" + elif isinstance(v, str): + query += f" & {k}=='{v}'" + elif isinstance(v, (int, float)): + query += f" & {k}=={v}" + else: + raise ValueError( + "Data type in attribute filter is not supported!" + ) + query = query[2:] + gdf = gdf.query(query) + + # Extract and rename fields + if attrs_mapping is not None: + gdf = gdf.filter(attrs_mapping.keys()) + gdf.rename(columns=attrs_mapping, inplace=True) + + return gdf + + +def reproject_simplify( + gdf: gpd.GeoDataFrame, + target_crs: str = GLOBAL_CONFIG["global"]["geodata"]["crs"].lower(), + min_size: float = None, + simplify_tol: float = None, + fix_geom: bool = False, + add_id_column: bool = False, +) -> gpd.GeoDataFrame: + """General purpose function for processing of geodata + + Parameters + ---------- + gdf : gpd.GeoDataFrame + Geodata + target_crs : str + CRS the data should be reprojected to. Defaults to value from global + config. + min_size : float + Min. size of area to select (in sqm). Use None for no filtering + (default). + Raises exception if `target_crs` is not LAEA Europe (EPSG:3035). + simplify_tol : float + Threshold for simplification of geometries. Use None for no + simplification (default). + Raises exception if `target_crs` is not LAEA Europe (EPSG:3035). + fix_geom : bool + If True, invalid geometries are fixed by buffering by the value + specified in the global config (geodata -> fix_geom_buffer). + add_id_column : bool + If True, data is reindexed starting from 0 and a new column "id" is + added with the same values. + + Returns + ------- + gpd.GeoDataFrame + Processed geodata + """ + + def check_crs(operation: str) -> None: + """Check if requested CRS is LAEA Europe (EPSG:3035)""" + if target_crs.lower() != "epsg:3035": + raise ValueError( + f"Cannot apply {operation} in non-equistant CRS " + f"(requested CRS: {target_crs.lower()}) !" + ) + + # Transform to target CRS + if gdf.crs is not None: + if str(gdf.crs).lower() != target_crs.lower(): + gdf = gdf.to_crs(target_crs) + else: + raise ValueError("Geodata has not CRS assigned.") + + # Filter by min size + if min_size is not None: + check_crs("min size filtering") + gdf = gdf[gdf.area > min_size] + + # Generalize + if simplify_tol is not None: + check_crs("simplification") + gdf["geometry"] = gdf.simplify(simplify_tol, preserve_topology=True) + + # Fix invalid geometries + if fix_geom is True: + buffer = GLOBAL_CONFIG["global"]["geodata"]["fix_geom_buffer"] + if buffer > 0: + gdf["geometry"] = gdf.buffer(buffer) + + # Reindex starting from 0 and add new column "id" with same values + if add_id_column is True: + gdf.reset_index(drop=True, inplace=True) + gdf = gdf.assign(id=gdf.index) + + return gdf + + +def overlay( + gdf: gpd.GeoDataFrame, + gdf_overlay: gpd.GeoDataFrame, + retain_rename_overlay_columns: dict = None, + gdf_use_centroid: bool = False, +) -> gpd.GeoDataFrame: + """Clips geodata to polygon + + Parameters + ---------- + gdf : gpd.GeoDataFrame + Geodata to be clipped (geometry in column "geometry") + gdf_overlay : gpd.GeoDataFrame + Geodata to clip `gdf` to, e.g. municipalities (geometry in column + "geometry") + retain_rename_overlay_columns : dict + Columns to retain from `gdf_clip` (do not include "geometry") + gdf_use_centroid : bool + If True, the centroid of gdf will be used for overlay (geometry column + will be retained though). Defaults to False. + """ + if retain_rename_overlay_columns is None: + columns = ["geometry"] + retain_rename_overlay_columns = {} + else: + if "geometry" in retain_rename_overlay_columns.keys(): + raise ValueError("Geometry must not be in rename dict!") + columns = list(retain_rename_overlay_columns.keys()) + ["geometry"] + + # Use centroid if requested + if gdf_use_centroid is True: + # Retain geometry + geometry_backup = gdf.geometry.copy() + + # Clip and rename columns + gdf_clipped = ( + gpd.overlay( + gdf.assign(geometry=gdf.centroid), + gdf_overlay[columns], + how="intersection", + ) + .rename(columns=retain_rename_overlay_columns) + .assign(geometry=geometry_backup) + ) + else: + # Clip and rename columns + gdf_clipped = gpd.overlay( + gdf, gdf_overlay[columns], how="intersection" + ).rename(columns=retain_rename_overlay_columns) + + return gdf_clipped + + +def clip_raster( + raster_file_in: str, clip_file: str, raster_file_out: str +) -> None: + """Clip raster data using vector data + + Parameters + ---------- + raster_file_in : str + Path to raster file to be clipped + clip_file : str + Path to vector file used for clipping + raster_file_out : str + Path to clipped raster file + + Returns + ------- + None + """ + clip_data = gpd.read_file(clip_file).geometry + with rio.open(raster_file_in) as f: + out_image, out_transform = mask(f, clip_data, crop=True) + out_meta = f.meta + + out_meta.update( + { + "driver": "GTiff", + "height": out_image.shape[1], + "width": out_image.shape[2], + "transform": out_transform, + } + ) + with rio.open(raster_file_out, "w", **out_meta) as dest: + dest.write(out_image) + + +def raster_zonal_stats( + raster_file_in: str, + clip_file: str, + zonal_file_out: str, + var_name: str = "mean", + stats: str = "mean", +) -> None: + """Create zonal stats + + Parameters + ---------- + raster_file_in : str + Path to raster file with data + clip_file : str + Path to vector file (zones) used for zonal stats + raster_file_out : str + Path to output raster file + var_name : str + Name of variable in output file + stats : str + Stats to be created, e.g. "mean" or "sum mean" + + Returns + ------- + None + """ + schema_in_geom, schema_in_props = read_schema_from_file(clip_file) + clip_data = gpd.read_file(clip_file) + + zonal_stats_df = gpd.GeoDataFrame() + zonal_stats_df = ( + zonal_stats_df.from_features( + zonal_stats( + clip_data, raster_file_in, stats=stats, geojson_out=True + ), + crs=3035, + ) + .rename(columns={"sum": var_name}) + .to_crs(GLOBAL_CONFIG["global"]["geodata"]["crs"]) + ) + + field_types = [ + (field, dtype) + for field, dtype in schema_in_props.items() + if field in zonal_stats_df.columns + ] + field_types.extend([(var_name, "float")]) + + schema_out = { + "geometry": schema_in_geom, + "properties": OrderedDict(field_types), + } + + write_geofile( + gdf=zonal_stats_df, + file=zonal_file_out, + layer_name="res", + schema=schema_out, + ) diff --git a/digipipe/store/3_datasets/TEMPLATE/config.yml b/digipipe/store/3_datasets/TEMPLATE/config.yml deleted file mode 100644 index dfd2d7b3..00000000 --- a/digipipe/store/3_datasets/TEMPLATE/config.yml +++ /dev/null @@ -1,3 +0,0 @@ -""" -Config file for dataset -""" diff --git a/digipipe/store/3_datasets/TEMPLATE/create.py b/digipipe/store/3_datasets/TEMPLATE/create.py deleted file mode 100644 index cd44ffd6..00000000 --- a/digipipe/store/3_datasets/TEMPLATE/create.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Process layer: Build new layer from preprocessed data -""" diff --git a/digipipe/store/DATASETS.md b/digipipe/store/DATASETS.md new file mode 100644 index 00000000..79fb033b --- /dev/null +++ b/digipipe/store/DATASETS.md @@ -0,0 +1,177 @@ +# Data pipeline + +## Data flow + +This section describes the data flow of the pipeline. + +(RAW) --> (PREPROCESSED) --> (DATASETS) --> (APPDATA) + +**Overview:** + +| **Step** | **Directory** | **Description** | **Rule(s) for target** | **Cfg section** | +|:--------:|-----------------------|-------------------------------------------|------------------------|-----------------| +| 0 | `store/raw/` | Raw data as downloaded | TBD | TBD | +| 1 | `store/preprocessed/` | Preprocessed data, 1:1 from (0) | TBD | TBD | +| 2 | `store/datasets/` | Datasets, n:1 from (0) and (1) | TBD | TBD | +| 3 | `store/appdata/` | Data ready to be used in the app from (2) | TBD | TBD | + +In the following each step is shortly described along a common example use +case. + +**Example data flow:** + +[![example data flow](../../docs/img/datasets/pipeline_dataflow_example.png)](../../docs/img/datasets/pipeline_dataflow_example.png) + +**Snakefile** + +As all rules will be +searched for and included in the main [Snakefile](../workflow/Snakefile), +they must have unique names. It's a good idea to use the dataset name as prefix, +e.g. `rule osm_forest_`. + +### (0) Raw + +Contains immutable raw data as downloaded. In directory `TEMPLATE` there are +two additional files: [dataset.md](../store/raw/TEMPLATE/dataset.md) (see that file +for further instructions) and [metadata.json)](../store/raw/TEMPLATE/metadata.json). + +Note: Assumptions are to be defined in the scenarios, not the raw data. +See the scenario readme in [SCENARIOS.md](../../digipipe/scenarios/SCENARIOS.md). + +!!! note "Example" + - Dataset A: ERA5 weather dataset for Germany + - Dataset B: MaStR dataset on renewable generators + - Dataset C: Shapefiles of region: federal districts and municipalities + +### (1) Preprocessed + +Data from `(0) Raw` that has undergone some preprocessing such as: + +- Archive extracted +- CRS transformed (see below for CRS conventions) +- Fields filtered +- **But NO merging/combining/clipping of multiple (raw) datasets! This should + be done in (2)** + +Rules, config and info + +- Preprocessing rule(s) for the dataset can be defined in the dataset's Snakefile: +[preprocessed/TEMPLATE/create.smk](preprocessed/TEMPLATE/create.smk). +- Subsequently, these rules **must be** included in the module file +[preprocessed/module.smk](preprocessed/module.smk) to take effect (see +template in the file). +- Custom, dataset-specific configuration can be put into the dataset's config +file [preprocessed/TEMPLATE/config.yml](preprocessed/TEMPLATE/config.yml). +- The title and description of each dataset are to be gathered in the file +[preprocessed/TEMPLATE/dataset.md](preprocessed/TEMPLATE/dataset.md). + +!!! note "Example" + + - Dataset D: Extracted ERA5 weather dataset for Germany (from dataset A) + - Dataset E: Wind energy turbines extracted from MaStR dataset, filter for + columns power and geometry (from dataset B) + - Dataset F: Federal districts converted to Geopackage file, CRS transformed + (from dataset C) + - Dataset G: Municipalities converted to Geopackage file, CRS transformed + (from dataset C) + +### (2) Datasets + +Datasets, created from arbitrary combinations of datasets from +`store/preprocessed/` and/or `store/datasets/`. + +Rules, config and info + +- Creation rule(s) for the dataset can be defined in the dataset's + Snakefile: [datasets/TEMPLATE/create.smk](datasets/TEMPLATE/create.smk). +- Subsequently, these rules **must be** included in the module file + [datasets/module.smk](datasets/module.smk) to take effect (see + template in the file). +- Custom, dataset-specific configuration can be put into the dataset's config + file [datasets/TEMPLATE/config.yml](datasets/TEMPLATE/config.yml). +- The title and description of each dataset are to be gathered in the file + [datasets/TEMPLATE/dataset.md](datasets/TEMPLATE/dataset.md). +- Custom, dataset-specific scripts are located in `scripts`. + +!!! note "Example" + Using datasets from `store/preprocessed/` and `store/datasets/`: + + - Dataset H: Wind energy turbines in the region of interest (from datasets E+F) + - Dataset I: Normalized wind energy feedin timeseries for the region (from + datasets D+G) + - Dataset J: Federal districts (from dataset F) + - Dataset K: Municipalities (from dataset G) + +### (3) App data + +Data ready to be used in the app / as expected by the app. + +While the data from (0), (1) and (2) contain datasets from intermediate steps, +this directory holds app-ready datasets. See +[appdata/APPDATA](appdata/APPDATA.md) for further information. + +### To which processing step do the different stages of my data belong to? + +Often, there are multiple options to assign a dataset. If you're still unsure +where the different stages of your dataset should be located after having +read the examples above, the following example may help you: + +Let's say you want to integrate some subsets of an OpenStreetMap dataset, +namely extracting (a) forests and (b) residential structures for a specific +region. You want both be stored in separate files. This involves (e.g.): + +1. Convert the pbf file to a more handy type such as Geopackage +2. Extract data using OSM tags +3. Clip with region + +First, you would create a new raw dataset in `store/raw/`. Then, you could +either put steps 1 and 2 in the preprocessing, resulting in two datasets in +`store/preprocessed/`. For each, you could then perform step 3 in +`store/datasets/`. +However, this would imply a redundant execution of step 1. While this is +basically fine in the terms of the pipeline flow, it might be a better idea to +apply only step 1 and create one dataset in `store/preprocessed/`. +Using this dataset, you would create the two extracts in `store/datasets/`. +Finally, the datasets after performing step 3 would be created in `store/datasets/` as well +resulting in a total of four datasets in `store/datasets/`. + +### Temporary files + +**TODO: REVISE** + +Temporary files are stored in `store/temp/` by default and, depending on your +configuration, can get quite large. You can change the directory in +`config.yml` --> `path` --> `temp`. + +## Further notes + +### No data files in the repository! But keep the `.gitkeep` files + +Make sure **not to commit any data files** located in `store/` to the +repository (except for the descriptive readme and metadata files). Rules have +been defined in the `.gitignore` file which should make git omit those +files but you better don't count on it. Instead, save them in the designated +directory on the [RLI Wolke](https://wolke.rl-institut.de/f/160572). + +Each data directory in the provided templates contain an empty `.gitkeep` +file. When creating a new dataset, please commit this file too to make sure +the (empty) data directory is retained. + +### Coordinate reference system + +Please use LAEA Europe (EPSG:3035) as default CRS when writing geodata. + +**TODO: REVISE** + +- The files in `store/raw/` can have an arbitrary CRS. +- In the preprocessing (step 1) it is converted to the CRS specified in the global `config.yml` --> `preprocessing` --> + `crs`. It is important to use a equal-area CRS to make sure operations such as buffering work properly. By default, + it is set to LAEA Europe (EPSG:3035). +- The final output is written in CRS specified in the global `config.yml` --> `output` --> `crs`. By default, it is set + to WGS84 (EPSG:4326) used by the app. + +## HowTos + +### Add a new dataset + +**TBD** diff --git a/digipipe/store/3_datasets/TEMPLATE/output/.gitkeep b/digipipe/store/__init__.py similarity index 100% rename from digipipe/store/3_datasets/TEMPLATE/output/.gitkeep rename to digipipe/store/__init__.py diff --git a/digipipe/store/appdata/APPDATA.md b/digipipe/store/appdata/APPDATA.md new file mode 100644 index 00000000..76a32d3e --- /dev/null +++ b/digipipe/store/appdata/APPDATA.md @@ -0,0 +1,16 @@ +# App data + +App-ready datasets for the +[Digiplan App](https://github.com/rl-institut-private/digiplan/). + +## Structure and content + +| Directory | File/dir | Content | +|-------------|----------------------|-----------------------------------------------| +| esys | | Energy system topology and scenario | +| | | | +| datapackage | datapackage_app.json | [Datapackage for app](datapackage/dataset.md) | +| | | | +| metadata | | | + +The app data is loaded on app startup. diff --git a/digipipe/store/appdata/datapackage/config.yml b/digipipe/store/appdata/datapackage/config.yml new file mode 100644 index 00000000..cced3918 --- /dev/null +++ b/digipipe/store/appdata/datapackage/config.yml @@ -0,0 +1,1388 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Datapackage definition +name: "digiplan_app_datapackage" +description: "Datapackage for Digiplan app" +resources: + # Base data + base_data: + scalars: + population: + description: "Bevölkerung" + path: "scalars/population.csv" + fields: + 2010: + description: "Bevölkerung 2010" + unit: "-" + 2015: + description: "Bevölkerung 2015" + unit: "-" + 2020: + description: "Bevölkerung 2020" + unit: "-" + 2021: + description: "Bevölkerung 2021" + unit: "-" + 2022: + description: "Bevölkerung 2022" + unit: "-" + 2025: + description: "Bevölkerung 2025" + unit: "-" + 2030: + description: "Bevölkerung 2030" + unit: "-" + 2035: + description: "Bevölkerung 2035" + unit: "-" + 2040: + description: "Bevölkerung 2040" + unit: "-" + 2045: + description: "Bevölkerung 2045" + unit: "-" + _source_path: + dataset: "population_region" + file: "population.csv" + employment: + description: "Beschäftigte und Betriebe" + path: "scalars/employment.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + employees_total: + description: "Sozialversicherungspflichtige Beschäftigte gesamt" + unit: "-" + companies_total: + description: "Betriebe gesamt" + unit: "-" + employees_ind: + description: "Sozialversicherungspflichtige Beschäftigte in der Industrie" + unit: "-" + companies_ind: + description: "Industriebetriebe" + unit: "-" + _source_path: + dataset: "employment_region" + file: "employment.csv" + osm_buildings: + description: "Gebäudegrundflächen und regionaler Anteil" + path: "scalars/osm_buildings_ground_area.json" + fields: + building_ground_area_country: + description: "Summe Gebäudegrundfläche in Deutschland" + unit: "m2" + building_ground_area_region: + description: "Summe Gebäudegrundfläche in der Region" + unit: "m2" + building_ground_area_share_region: + description: "Regionaler Anteil an Gebäudegrundflächen" + unit: "-" + _source_path: + dataset: "osm_buildings" + file: "osm_buildings_ground_area.json" + geodata: + # Administrative areas + bkg_vg250_region: + description: "Region" + path: "geodata/bkg_vg250_region.gpkg" + fields: [ ] + _source_path: + dataset: "bkg_vg250_region" + file: "bkg_vg250_region.gpkg" + bkg_vg250_districts_region: + description: "Landkreise" + path: "geodata/bkg_vg250_districts_region.gpkg" + fields: [ ] + _source_path: + dataset: "bkg_vg250_districts_region" + file: "bkg_vg250_districts_region.gpkg" + bkg_vg250_muns_region: + description: "Gemeinden" + path: "geodata/bkg_vg250_muns_region.gpkg" + fields: [ ] + _source_path: + dataset: "bkg_vg250_muns_region" + file: "bkg_vg250_muns_region.gpkg" + + # From Djagora + air_traffic_control_system_region: + description: "Drehfunkfeuer" + path: "geodata/air_traffic_control_system_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "air_traffic_control_system_region.gpkg" + aviation_region: + description: "Luftverkehr" + path: "geodata/aviation_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "aviation_region.gpkg" + biosphere_reserve_region: + description: "Biosphärenreservate" + path: "geodata/biosphere_reserve_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "biosphere_reserve_region.gpkg" + drinking_water_protection_area_region: + description: "Wasserschutzgebiete" + path: "geodata/drinking_water_protection_area_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "drinking_water_protection_area_region.gpkg" + fauna_flora_habitat_region: + description: "Fauna-Flora-Habitate" + path: "geodata/fauna_flora_habitat_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "fauna_flora_habitat_region.gpkg" + floodplain_region: + description: "Überschwemmungsgebiete" + path: "geodata/floodplain_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "floodplain_region.gpkg" + forest_region: + description: "Wälder" + path: "geodata/forest_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "forest_region.gpkg" + grid_region: + description: "Stromnetze (>=110 kV)" + path: "geodata/grid_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "grid_region.gpkg" + industry_region: + description: "Industrie- und Gewerbegebiete" + path: "geodata/industry_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "industry_region.gpkg" + landscape_protection_area_region: + description: "Landschaftsschutzgebiete" + path: "geodata/landscape_protection_area_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "landscape_protection_area_region.gpkg" + less_favoured_areas_agricultural_region: + description: "Benachteiligte Gebiete" + path: "geodata/less_favoured_areas_agricultural_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "less_favoured_areas_agricultural_region.gpkg" + military_region: + description: "Militärische Sperrgebiete und Liegenschaften" + path: "geodata/military_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "military_region.gpkg" + nature_conservation_area_region: + description: "Naturschutzgebiete" + path: "geodata/nature_conservation_area_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "nature_conservation_area_region.gpkg" + railway_region: + description: "Bahnverkehr" + path: "geodata/railway_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "railway_region.gpkg" + road_region: + description: "Straßen" + path: "geodata/road_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "road_region.gpkg" + road_railway-500m_region: + description: "Straßen und Bahnverkehr (500 m Puffer)" + path: "geodata/road_railway-500m_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "road_railway-500m_region.gpkg" + settlement-0m_region: + description: "Siedlungen" + path: "geodata/settlement-0m_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "settlement-0m_region.gpkg" + soil_quality_high_region: + description: "Ackerflächen mit hoher Bodenqualität (Soil Quality Rating >= 40)" + path: "geodata/soil_quality_high_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "soil_quality_high_region.gpkg" + soil_quality_low_region: + description: "Ackerflächen mit geringer Bodenqualität (Soil Quality Rating < 40)" + path: "geodata/soil_quality_low_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "soil_quality_low_region.gpkg" + special_protection_area_region: + description: "Vogelschutzgebiete" + path: "geodata/special_protection_area_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "special_protection_area_region.gpkg" + water_region: + description: "Gewässser" + path: "geodata/water_region.gpkg" + fields: [ ] + _source_path: + dataset: "rli_pv_wfr_region" + file: "water_region.gpkg" + + + # Energy production + production: + scalars: + #units: + biomass: + description: "Biomasseanlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_biomass_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_biomass_region" + file: "bnetza_mastr_biomass_stats_muns.csv" + combustion: + description: "Verbrennungskraftwerke: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_combustion_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_combustion_region" + file: "bnetza_mastr_combustion_stats_muns.csv" + gsgk: + description: "Geo- oder Solarthermie-, Grubengas- und Klärschlamm-Anlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_gsgk_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_gsgk_region" + file: "bnetza_mastr_gsgk_stats_muns.csv" + hydro-ror: + description: "Wasserkraftanlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_hydro_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_hydro_region" + file: "bnetza_mastr_hydro_stats_muns.csv" + solar-pv_ground: + description: "PV-Freiflächenanlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_pv_ground_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_pv_ground_region" + file: "bnetza_mastr_pv_ground_stats_muns.csv" + solar-pv_ground_development_over_time: + description: "Veränderung (pro Jahr) der installierten Gesamtleistung und der Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_pv_ground_development_over_time.csv" + fields: + year: + description: "Jahr" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + unit_count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_pv_ground_region" + file: "bnetza_mastr_pv_ground_development_over_time.csv" + solar-pv_rooftop: + description: "PV-Aufdachanlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_pv_roof_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_pv_roof_region" + file: "bnetza_mastr_pv_roof_stats_muns.csv" + solar-pv_rooftop_development_over_time: + description: "Veränderung (pro Jahr) der installierten Gesamtleistung und der Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_pv_roof_development_over_time.csv" + fields: + year: + description: "Jahr" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + unit_count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_pv_roof_region" + file: "bnetza_mastr_pv_roof_development_over_time.csv" + storage: + description: "Batteriespeicher gesamt: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_storage_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + storage_capacity: + description: "Speicherkapazität" + unit: "MWh" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_storage_region" + file: "bnetza_mastr_storage_stats_muns.csv" + storage_large: + description: "Batteriespeicher groß (>=100 kWh): Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_storage_large_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + storage_capacity: + description: "Speicherkapazität" + unit: "MWh" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_storage_region" + file: "bnetza_mastr_storage_large_stats_muns.csv" + storage_small: + description: "Batteriespeicher klein (<100 kWh): Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_storage_small_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + storage_capacity: + description: "Speicherkapazität" + unit: "MWh" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_storage_region" + file: "bnetza_mastr_storage_small_stats_muns.csv" + storage_pv_roof: + description: "Spezifische Kapazität und Leistung von Batteriespeichern, die PV-Aufdachanlagen zugeordnet sind (in Betrieb)" + path: "scalars/bnetza_mastr_storage_pv_roof.json" + fields: + pv_roof_share: + all_storages: + description: "Anteil von PV-Aufdachanlagen mit Speicher (alle Anlagen und Speicher)" + unit: "-" + home_storages: + description: "Anteil von PV-Aufdachanlagen mit Speicher (kleine Anlagen und Speicher)" + unit: "" + specific_capacity: + all_storages: + description: "Mittlere spezifische Kapazität aller Speicher" + unit: "kWh/kWp" + home_storages: + description: "Mittlere spezifische Kapazität von kleinen Speichern" + unit: "kWh/kWp" + specific_power: + all_storages: + description: "Mittlere spezifische Leistung aller Speicher" + unit: "kW/kWp" + home_storages: + description: "Mittlere spezifische Leistung von kleinen Speichern" + unit: "kW/kWp" + _source_path: + dataset: "bnetza_mastr_storage_region" + file: "bnetza_mastr_storage_pv_roof.json" + wind-onshore: + description: "Windenergieanlagen: Installierte Leistung und Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_wind_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_wind_region" + file: "bnetza_mastr_wind_stats_muns.csv" + wind-onshore_development_over_time: + description: "Veränderung (pro Jahr) der installierten Gesamtleistung und der Anzahl (in Betrieb)" + path: "scalars/bnetza_mastr_wind_development_over_time.csv" + fields: + year: + description: "Jahr" + unit: "-" + capacity_net: + description: "Nennleistung" + unit: "MW" + unit_count: + description: "Anzahl" + unit: "-" + _source_path: + dataset: "bnetza_mastr_wind_region" + file: "bnetza_mastr_wind_development_over_time.csv" + technology_data: + description: "Volllaststunden je Technologie und Jahr" + path: "scalars/technology_data.json" + fields: + full_load_hours: + wind: + description: "Volllaststunden Windenergie" + unit: "-" + pv_ground: + description: "Volllaststunden Freiflächen-PV" + unit: "-" + pv_roof: + description: "Volllaststunden Aufdach-PV" + unit: "-" + st: + description: "Volllaststunden Solarthermie" + unit: "-" + ror: + description: "Volllaststunden Laufwasserkraft" + unit: "-" + bioenergy: + description: "Volllaststunden Bioenergie-Stromerzeugung (ohne biogenen Teil des Abfalls)" + unit: "-" + power_density: + wind: + description: "Leistungsdichte Windenergie" + unit: "MW/km2" + pv_ground: + description: "Leistungsdichte PV-Freiflächenanlagen" + unit: "MW/km2" + pv_roof: + description: "Leistungsdichte PV-Aufdachanlagen" + unit: "MW/km2" + st: + description: "Leistungsdichte Solarthermie" + unit: "MW/km2" + nominal_power_per_unit: + wind: + description: "Nennleistung je Anlage" + unit: "MW/Anlage" + batteries: + large: + nominal_power_per_storage_capacity: + description: "Speichernennleistung je Speicherkapazität" + unit: "kW/kWh" + small: + nominal_power_per_storage_capacity: + description: "Speichernennleistung je Speicherkapazität" + unit: "kW/kWh" + storage_capacity_per_pv_power: + description: "Speicherkapazität je Nennleistung der PV-Anlage" + unit: "kWh/kWp" + hot_water_storages: + large: + nominal_power_per_storage_capacity: + description: "Speichernennleistung je Speicherkapazität" + unit: "kW/kWh" + small: + nominal_power_per_storage_capacity: + description: "Speichernennleistung je Speicherkapazität" + unit: "kW/kWh" + _source_path: + dataset: "technology_data" + file: "technology_data.json" + + sequences: + solar-pv_ground_profile: + description: "Einspeisezeitreihe PV-Freiflächenanlagen, auf 1 normalisiert" + path: "sequences/pv_feedin_timeseries.csv" + fields: + power: + description: "Einspeisung" + unit: "-" + _source_path: + dataset: "renewable_feedin" + file: "pv_feedin_timeseries.csv" + solar-pv_rooftop_profile: + description: "Einspeisezeitreihe PV-Aufdachanlagen, auf 1 normalisiert" + path: "sequences/pv_feedin_timeseries.csv" + fields: + power: + description: "Einspeisung" + unit: "-" + _source_path: + dataset: "renewable_feedin" + file: "pv_feedin_timeseries.csv" + solar-thermalcollector_profile: + description: "Einspeisezeitreihe Solarthermie, auf 1 normalisiert" + path: "sequences/st_feedin_timeseries.csv" + fields: + power: + description: "Einspeisung" + unit: "-" + _source_path: + dataset: "renewable_feedin" + file: "st_feedin_timeseries.csv" + hydro-ror_profile: + description: "Einspeisezeitreihe Wasserkraft, auf 1 normalisiert" + path: "sequences/ror_feedin_timeseries.csv" + fields: + power: + description: "Einspeisung" + unit: "-" + _source_path: + dataset: "renewable_feedin" + file: "ror_feedin_timeseries.csv" + wind-onshore_profile: + description: "Einspeisezeitreihe Windenergie, auf 1 normalisiert" + path: "sequences/wind_feedin_timeseries.csv" + fields: + power: + description: "Einspeisung" + unit: "-" + _source_path: + dataset: "renewable_feedin" + file: "wind_feedin_timeseries.csv" + heatpump-cop_profile: + description: "Wärmepumpen COP-Zeitreihe" + path: "sequences/heatpump_cop_timeseries.csv" + fields: + cop: + description: "COP" + unit: "-" + _source_path: + dataset: "heatpump_cop" + file: "heatpump_cop_timeseries.csv" + + geodata: + # Generation units + bnetza_mastr_biomass_agg_region: + description: "Biomasseanlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_biomass_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_biomass_region" + file: "bnetza_mastr_biomass_agg_region.gpkg" + bnetza_mastr_combustion_agg_region: + description: "Verbrennungskraftwerke (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_combustion_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_combustion_region" + file: "bnetza_mastr_combustion_agg_region.gpkg" + bnetza_mastr_gsgk_agg_region: + description: "Geo- oder Solarthermie-, Grubengas- und Klärschlamm-Anlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_gsgk_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_gsgk_region" + file: "bnetza_mastr_gsgk_agg_region.gpkg" + bnetza_mastr_hydro_agg_region: + description: "Wasserkraftanlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_hydro_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_hydro_region" + file: "bnetza_mastr_hydro_agg_region.gpkg" + bnetza_mastr_pv_ground_agg_region: + description: "PV-Freiflächenanlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_pv_ground_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_pv_ground_region" + file: "bnetza_mastr_pv_ground_agg_region.gpkg" + bnetza_mastr_pv_roof_agg_region: + description: "PV-Aufdachanlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_pv_roof_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_pv_roof_region" + file: "bnetza_mastr_pv_roof_agg_region.gpkg" + bnetza_mastr_storage_agg_region: + description: "Batteriespeicher (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_storage_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_storage_region" + file: "bnetza_mastr_storage_agg_region.gpkg" + bnetza_mastr_wind_agg_region: + description: "Windenergieanlagen (In Betrieb oder geplant)" + path: "geodata/bnetza_mastr_wind_agg_region.gpkg" + fields: [ ] + _source_path: + dataset: "bnetza_mastr_wind_region" + file: "bnetza_mastr_wind_agg_region.gpkg" + # Energy demand + demand: + scalars: + # Electricity demand + electricity-demand_hh: + description: "Strombedarf Haushalte" + path: "scalars/demand_hh_power_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_electricity_region" + file: "demand_hh_power_demand.csv" + electricity-demand_cts: + description: "Strombedarf GHD" + path: "scalars/demand_cts_power_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_electricity_region" + file: "demand_cts_power_demand.csv" + electricity-demand_ind: + description: "Strombedarf Industrie" + path: "scalars/demand_ind_power_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_electricity_region" + file: "demand_ind_power_demand.csv" + + # Heat demand + heat-demand_hh: + description: "Wärmebedarf Haushalte gesamt" + path: "scalars/demand_hh_heat_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_hh_heat_demand.csv" + heat_central-demand_hh: + description: "Wärmebedarf Haushalte Fernwärme" + path: "scalars/demand_hh_heat_demand_cen.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_hh_heat_demand_cen.csv" + heat_decentral-demand_hh: + description: "Wärmebedarf Haushalte dezentral" + path: "scalars/demand_hh_heat_demand_dec.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_hh_heat_demand_dec.csv" + heat-demand_cts: + description: "Wärmebedarf GHD gesamt" + path: "scalars/demand_cts_heat_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_cts_heat_demand.csv" + heat_central-demand_cts: + description: "Wärmebedarf GHD Fernwärme" + path: "scalars/demand_cts_heat_demand_cen.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_cts_heat_demand_cen.csv" + heat_decentral-demand_cts: + description: "Wärmebedarf GHD dezentral" + path: "scalars/demand_cts_heat_demand_dec.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_cts_heat_demand_dec.csv" + heat-demand_ind: + description: "Wärmebedarf Industrie gesamt" + path: "scalars/demand_ind_heat_demand.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_ind_heat_demand.csv" + heat_central-demand_ind: + description: "Wärmebedarf Industrie Fernwärme" + path: "scalars/demand_ind_heat_demand_cen.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_ind_heat_demand_cen.csv" + heat_decentral-demand_ind: + description: "Wärmebedarf Industrie dezentral" + path: "scalars/demand_ind_heat_demand_dec.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + 2022: + description: "Bedarf 2022" + unit: "MWh" + 2045: + description: "Bedarf 2045" + unit: "MWh" + _source_path: + dataset: "demand_heat_region" + file: "demand_ind_heat_demand_dec.csv" + + # Heating structure + heating_structure_decentral: + description: "Beheizungsstruktur dezentral (energetische Anteile)" + path: "scalars/demand_heat_structure_esys_dec.csv" + fields: + year: + description: "Jahr" + unit: "-" + carrier: + description: "Energieträger" + unit: "-" + demand_rel: + description: "Relativer Bedarf" + unit: "-" + _source_path: + dataset: "demand_heat_region" + file: "demand_heat_structure_esys_dec.csv" + heating_structure_central: + description: "Beheizungsstruktur Fernwärme (energetische Anteile)" + path: "scalars/demand_heat_structure_esys_cen.csv" + fields: + year: + description: "Jahr" + unit: "-" + carrier: + description: "Energieträger" + unit: "-" + demand_rel: + description: "Relativer Bedarf" + unit: "-" + _source_path: + dataset: "demand_heat_region" + file: "demand_heat_structure_esys_cen.csv" + + sequences: + # Electricity demand profiles + electricity-demand_hh_profile: + description: "Strombedarfszeitreihe Haushalte, auf 1 normalisiert" + path: "sequences/demand_hh_power_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_electricity_region" + file: "demand_hh_power_timeseries.csv" + electricity-demand_cts_profile: + description: "Strombedarfszeitreihe GHD, auf 1 normalisiert" + path: "sequences/demand_cts_power_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_electricity_region" + file: "demand_cts_power_timeseries.csv" + electricity-demand_ind_profile: + description: "Strombedarfszeitreihe Industrie, auf 1 normalisiert" + path: "sequences/demand_ind_power_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_electricity_region" + file: "demand_ind_power_timeseries.csv" + + # Heat demand profiles + heat-demand_hh_profile: + description: "Wärmebedarfszeitreihe Haushalte, auf 1 normalisiert" + path: "sequences/demand_hh_heat_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_heat_region" + file: "demand_hh_heat_timeseries.csv" + heat-demand_cts_profile: + description: "Wärmebedarfszeitreihe GHD, auf 1 normalisiert" + path: "sequences/demand_cts_heat_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_heat_region" + file: "demand_cts_heat_timeseries.csv" + heat-demand_ind_profile: + description: "Wärmebedarfszeitreihe Industrie, auf 1 normalisiert" + path: "sequences/demand_ind_heat_timeseries.csv" + fields: + demand_norm: + description: "Bedarf" + unit: "-" + _source_path: + dataset: "demand_heat_region" + file: "demand_ind_heat_timeseries.csv" + + # Eligible areas for Wind turbines and ground-mounted PV + potential_areas: + scalars: + wind_area_stats: + description: "Flächensumme Potenzialflächen Windenergie je Flächentyp" + path: "scalars/potentialarea_wind_area_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + stp_2018_vreg: + description: "STP Wind 2018 - Vorrang-/Eignungsgebiete" + unit: "km2" + stp_2027_vr: + description: "STP Wind 2027 - Planabsicht Vorranggebiete" + unit: "km2" + stp_2027_repowering: + description: "STP Wind 2027 - Planabsicht Repoweringgebiete" + unit: "km2" + stp_2027_search_area_forest_area: + description: "STP Wind 2027 - Suchraum Wald" + unit: "km2" + stp_2027_search_area_open_area: + description: "STP Wind 2027 - Suchraum Offenland" + unit: "km2" + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_area_stats_muns.csv" + pv_ground_area_stats: + description: "Flächensumme Potenzialflächen PV-Freiflächenanlagen je Flächentyp" + path: "scalars/potentialarea_pv_ground_area_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + agriculture_lfa-off_region: + description: "Freiflächen-PV auf Acker- und Grünlandflächen mit geringer Bodengüte" + unit: "km2" + road_railway_region: + description: "Potenzialflächen für Freiflächen-PV entlang von Bundesautobahnen und Schienenwegen (500m-Streifen)" + unit: "km2" + _source_path: + dataset: "potentialarea_pv_ground_region" + file: "potentialarea_pv_ground_area_stats_muns.csv" + pv_ground_area_shares: + description: "Anteil Potenzialflächen an Regionsfläche nach Flächentyp" + path: "scalars/potentialarea_pv_ground_area_shares.json" + fields: + road_railway: + description: "Anteil Potenzialflächen entlang von Bundesautobahnen und Schienenwegen an Regionsfläche" + unit: "-" + agri: + description: "Anteil Potenzialflächen auf Acker- und Grünlandflächen mit geringer Bodengüte an Regionsfläche" + unit: "-" + _source_path: + dataset: "potentialarea_pv_ground_region" + file: "potentialarea_pv_ground_area_shares.json" + pv_ground_area_targets: + description: "Regionale, aus Bundeszielen anhand der Potenzialflächen desaggregierte Bundesziele für Freiflächen-PV nach Flächentyp" + path: "scalars/potentialarea_pv_ground_regionalized_targets.json" + fields: + target_area_total: + description: "Regionales Flächenziel gesamt" + unit: "km2" + target_area_road_railway: + description: "Regionales Flächenziel entlang von Bundesautobahnen und Schienenwegen" + unit: "km2" + target_area_agri: + description: "Regionales Flächenziel auf Acker- und Grünlandflächen mit geringer Bodengüte" + unit: "km2" + target_power_total: + description: "Regionales Leistungsziel gesamt" + unit: "MW" + target_power_road_railway: + description: "Regionales Leistungsziel entlang von Bundesautobahnen und Schienenwegen" + unit: "MW" + target_power_agri: + description: "Regionales Leistungsziel auf Acker- und Grünlandflächen mit geringer Bodengüte" + unit: "MW" + _source_path: + dataset: "potentialarea_pv_ground_region" + file: "potentialarea_pv_ground_regionalized_targets.json" + pv_roof_area_stats: + description: "Potenziale PV-Aufdachanlagen (alle Gebäude)" + path: "scalars/potentialarea_pv_roof_area_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + roof_count: + description: "Anzahl der Dächer/Gebäude" + unit: "-" + building_area_sqm: + description: "Gesamtgrundfläche" + unit: "m2" + historic_preservation_count: + description: "Anzahl der Gebäude unter Denkmalschutz" + unit: "-" + installable_power_south: + description: "Installierbare Gesamtleistung (Südausrichtung)" + unit: "MW" + installable_power_north: + description: "Installierbare Gesamtleistung (Nordausrichtung)" + unit: "MW" + installable_power_east: + description: "Installierbare Gesamtleistung (Ostausrichtung)" + unit: "MW" + installable_power_west: + description: "Installierbare Gesamtleistung (Westausrichtung)" + unit: "MW" + installable_power_flat: + description: "Installierbare Gesamtleistung (Flachdach)" + unit: "MW" + installable_power_total: + description: "Installierbare Gesamtleistung (Summe)" + unit: "MW" + energy_annual_south: + description: "Erzielbarer Energieertrag (Südausrichtung)" + unit: "MWh" + energy_annual_north: + description: "Erzielbarer Energieertrag (Nordausrichtung)" + unit: "MWh" + energy_annual_east: + description: "Erzielbarer Energieertrag (Ostausrichtung)" + unit: "MWh" + energy_annual_west: + description: "Erzielbarer Energieertrag (Westausrichtung)" + unit: "MWh" + energy_annual_flat: + description: "Erzielbarer Energieertrag (Flachdach)" + unit: "MWh" + energy_annual_total: + description: "Erzielbarer Energieertrag (Summe)" + unit: "MWh" + _source_path: + dataset: "potentialarea_pv_roof_region" + file: "potentialarea_pv_roof_area_stats_muns.csv" + pv_roof_area_deployment_stats: + description: "Genutzte Potenziale PV-Aufdachanlagen (alle Gebäude)" + path: "scalars/potentialarea_pv_roof_deployment_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + relative_deployment: + description: "Relative Potenzialnutzung" + unit: "-" + _source_path: + dataset: "potentialarea_pv_roof_region" + file: "potentialarea_pv_roof_deployment_stats_muns.csv" + pv_roof_wo_historic_area_stats: + description: "Potenziale PV-Aufdachanlagen (ohne denkmalgeschützte Gebäude)" + path: "scalars/potentialarea_pv_roof_wo_historic_area_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + roof_count: + description: "Anzahl der Dächer/Gebäude" + unit: "-" + building_area_sqm: + description: "Gesamtgrundfläche" + unit: "m2" + historic_preservation_count: + description: "Anzahl der Gebäude unter Denkmalschutz" + unit: "-" + installable_power_south: + description: "Installierbare Gesamtleistung (Südausrichtung)" + unit: "MW" + installable_power_north: + description: "Installierbare Gesamtleistung (Nordausrichtung)" + unit: "MW" + installable_power_east: + description: "Installierbare Gesamtleistung (Ostausrichtung)" + unit: "MW" + installable_power_west: + description: "Installierbare Gesamtleistung (Westausrichtung)" + unit: "MW" + installable_power_flat: + description: "Installierbare Gesamtleistung (Flachdach)" + unit: "MW" + installable_power_total: + description: "Installierbare Gesamtleistung (Summe)" + unit: "MW" + energy_annual_south: + description: "Erzielbarer Energieertrag (Südausrichtung)" + unit: "MWh" + energy_annual_north: + description: "Erzielbarer Energieertrag (Nordausrichtung)" + unit: "MWh" + energy_annual_east: + description: "Erzielbarer Energieertrag (Ostausrichtung)" + unit: "MWh" + energy_annual_west: + description: "Erzielbarer Energieertrag (Westausrichtung)" + unit: "MWh" + energy_annual_flat: + description: "Erzielbarer Energieertrag (Flachdach)" + unit: "MWh" + energy_annual_total: + description: "Erzielbarer Energieertrag (Summe)" + unit: "MWh" + _source_path: + dataset: "potentialarea_pv_roof_region" + file: "potentialarea_pv_roof_wo_historic_area_stats_muns.csv" + pv_roof_wo_historic_area_deployment_stats: + description: "Genutzte Potenziale PV-Aufdachanlagen (ohne denkmalgeschützte Gebäude)" + path: "scalars/potentialarea_pv_roof_wo_historic_deployment_stats_muns.csv" + fields: + municipality_id: + description: "Gemeinde-ID" + unit: "-" + relative_deployment: + description: "Relative Potenzialnutzung" + unit: "-" + _source_path: + dataset: "potentialarea_pv_roof_region" + file: "potentialarea_pv_roof_wo_historic_deployment_stats_muns.csv" + pv_roof_area_targets: + description: "Regionale, aus Bundeszielen anhand der Gebäudegrundflächen desaggregierte Bundesziele für Aufdach-PV" + path: "scalars/potentialarea_pv_roof_regionalized_targets.json" + fields: + target_power_total: + description: "Regionales Leistungsziel gesamt" + unit: "MW" + _source_path: + dataset: "potentialarea_pv_roof_region" + file: "potentialarea_pv_roof_regionalized_targets.json" + + geodata: + potentialarea_pv_agriculture_lfa-off_region: + description: "Potenzialflächen Freiflächen-PV auf Ackerflächen inner- und außerhalb benachteiligter Gebiete" + path: "geodata/potentialarea_pv_agriculture_lfa-off_region.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_pv_ground_region" + file: "potentialarea_pv_agriculture_lfa-off_region.gpkg" + potentialarea_pv_road_railway_region: + description: "Potenzialflächen Freiflächen-PV entlang von Autobahnen und Schienenwegen" + path: "geodata/potentialarea_pv_road_railway_region.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_pv_ground_region" + file: "potentialarea_pv_road_railway_region.gpkg" + + potentialarea_wind_stp_2018_vreg: + description: "Potenzialflächen Windenergie: STP Wind 2018 - Vorrang-/Eignungsgebiete" + path: "geodata/potentialarea_wind_stp_2018_vreg.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_stp_2018_vreg.gpkg" + potentialarea_wind_stp_2027_repowering: + description: "Potenzialflächen Windenergie: STP Wind 2027 - Planabsicht Repoweringgebiete" + path: "geodata/potentialarea_wind_stp_2027_repowering.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_stp_2027_repowering.gpkg" + potentialarea_wind_stp_2027_search_area_forest_area: + description: "Potenzialflächen Windenergie: STP Wind 2027 - Suchraum Wald" + path: "geodata/potentialarea_wind_stp_2027_search_area_forest_area.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_stp_2027_search_area_forest_area.gpkg" + potentialarea_wind_stp_2027_search_area_open_area: + description: "Potenzialflächen Windenergie: STP Wind 2027 - Suchraum Offenland" + path: "geodata/potentialarea_wind_stp_2027_search_area_open_area.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_stp_2027_search_area_open_area.gpkg" + potentialarea_wind_stp_2027_vr: + description: "Potenzialflächen Windenergie: STP Wind 2027 - Planabsicht Vorranggebiete" + path: "geodata/potentialarea_wind_stp_2027_vr.gpkg" + fields: [ ] + _source_path: + dataset: "potentialarea_wind_region" + file: "potentialarea_wind_stp_2027_vr.gpkg" + + # Eligible areas for Wind turbines and ground-mounted PV + emissions: + scalars: + emissions: + description: "Emissionen für Sachsen-Anhalt und die Region ABW für 1990 und 2019" + path: "scalars/emissions.csv" + fields: + sector: + description: "Sektor nach CRF" + unit: "-" + cat: + description: "Kategorie nach CRF" + unit: "-" + subcat: + description: "Unterkategorie nach CRF" + unit: "-" + name: + description: "Bezeichner" + unit: "-" + st: + description: "Emissionen Sachsen-Anhalt" + unit: "kt CO2äq" + abw: + description: "Emissionen Region ABW" + unit: "kt CO2äq" + _source_path: + dataset: "emissions_region" + file: "emissions.csv" + emissions_chart_overview: + description: "Chartdaten: Emissionen für Sachsen-Anhalt und die Region ABW für 1990 und 2019" + path: "scalars/emissions_chart_overview.json" + fields: + energy_industry: + description: "Energiewirtschaft" + unit: "kt CO2äq" + industry: + description: "Industrie" + unit: "kt CO2äq" + traffic: + description: "Verkehr" + unit: "kt CO2äq" + buildings_firing: + description: "Sonst. Energie (insbes. Gebäude)" + unit: "kt CO2äq" + agricultural: + description: "Landwirtschaft" + unit: "kt CO2äq" + waste_waste_water: + description: "Abfall und Abwasser" + unit: "kt CO2äq" + _source_path: + dataset: "emissions_region" + file: "emissions_chart_overview.json" + + # Settings + settings: + panels: + energy_settings_panel: + description: "Einstellungen für Panel Strom" + path: "settings/energy_settings_panel.json" + fields: + s_w_1: + description: "Attribute für Slider s_w_1" + unit: "-" + _source_path: + dataset: "app_settings" + file: "energy_settings_panel.json" + heat_settings_panel: + description: "Einstellungen für Panel Wärme" + path: "settings/heat_settings_panel.json" + fields: + w_d_wp_1: + description: "Attribute für Slider w_d_wp_1" + unit: "-" + _source_path: + dataset: "app_settings" + file: "heat_settings_panel.json" + traffic_settings_panel: + description: "Einstellungen für Panel Verkehr" + path: "settings/traffic_settings_panel.json" + fields: + v_iv_1: + description: "Attribute für Slider v_iv_1" + unit: "-" + _source_path: + dataset: "app_settings" + file: "traffic_settings_panel.json" + map: + map_panel_layer_list: + description: "Layerliste (rechtes Panel)" + path: "settings/map_panel_layer_list.json" + fields: + boundaries: + description: "Grenzen" + unit: "-" + energy: + description: "Energie" + unit: "-" + settlements_infrastructure: + description: "Siedlungen und Infrastruktur" + unit: "-" + nature_landscape: + description: "Natur und Landschaft" + unit: "-" + _source_path: + dataset: "app_settings" + file: "map_panel_layer_list.json" + + # Captions + captions: + captions: + captions_fields: + description: "Captions für Datensatzfelder" + path: "captions/captions_fields.json" + fields: + datasets_caption_map: + description: "Mapping File -> Eintrag" + unit: "-" + captions: + description: "Eintrag mit Beschriftungstexten" + unit: "-" + _source_path: + dataset: "app_captions" + file: "captions_fields.json" diff --git a/digipipe/store/appdata/datapackage/create.smk b/digipipe/store/appdata/datapackage/create.smk new file mode 100644 index 00000000..90c5a447 --- /dev/null +++ b/digipipe/store/appdata/datapackage/create.smk @@ -0,0 +1,35 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import shutil + +from digipipe.store.utils import get_abs_dataset_path +from digipipe.store.appdata.datapackage.scripts.create import collect_files + +DATASET_PATH = get_abs_dataset_path("appdata", "datapackage", data_dir=True) +DATAPACKAGE_FILES = collect_files(config, DATASET_PATH) + +rule copy_files: + """ + Copy required files + """ + input: DATAPACKAGE_FILES[0] + output: DATAPACKAGE_FILES[1] + run: + print("Copy required files for datapackage...") + for src_file, dst_file in zip(*DATAPACKAGE_FILES): + shutil.copy(src_file, dst_file) + +rule create_datapackage: + """ + Create datapackage for app + """ + input: DATAPACKAGE_FILES[1] + output: DATASET_PATH / "datapackage_app.json" + run: + print("Creating datapackage...") + with open(output[0], "w", encoding="utf8") as f: + json.dump(config, f, indent=4) diff --git a/digipipe/store/4_appdata/data/.gitkeep b/digipipe/store/appdata/datapackage/data/geodata/.gitkeep similarity index 100% rename from digipipe/store/4_appdata/data/.gitkeep rename to digipipe/store/appdata/datapackage/data/geodata/.gitkeep diff --git a/digipipe/store/4_appdata/metadata/.gitkeep b/digipipe/store/appdata/datapackage/data/scalars/.gitkeep similarity index 100% rename from digipipe/store/4_appdata/metadata/.gitkeep rename to digipipe/store/appdata/datapackage/data/scalars/.gitkeep diff --git a/digipipe/store/4_appdata/scenarios/.gitkeep b/digipipe/store/appdata/datapackage/data/sequences/.gitkeep similarity index 100% rename from digipipe/store/4_appdata/scenarios/.gitkeep rename to digipipe/store/appdata/datapackage/data/sequences/.gitkeep diff --git a/digipipe/store/appdata/datapackage/dataset.md b/digipipe/store/appdata/datapackage/dataset.md new file mode 100644 index 00000000..01288fe2 --- /dev/null +++ b/digipipe/store/appdata/datapackage/dataset.md @@ -0,0 +1,25 @@ +# Datapackage für App + +Von der App benötigte Daten in JSON `datapackage_app.json`. + +Generelle Struktur: + +- `` + - `` (`scalars`, `sequences` oder `geodata`) + - `` + - `description`: Beschreibung + - `path`: Pfad zur Zieldatei im Datapackage + - `fields`: Felder-/Spaltendefinition + - `_source_path`: Pfad zur Datei im Quelldatensatz + - `dataset`: Name in `store/datasets/` + - `file`: Datei + +Kategorien bzw. Inhalt `resources`: + +- `base_data`: Basisdaten +- `production`: Energiebereitstellung +- `demand`: Energiebedarf +- `potential_areas`: Potenzialgebiete EE +- `emissions`: Emissionen +- `settings`: App-Settings +- `captions`: App-Captions diff --git a/digipipe/store/appdata/datapackage/scripts/create.py b/digipipe/store/appdata/datapackage/scripts/create.py new file mode 100644 index 00000000..e9bc3ef8 --- /dev/null +++ b/digipipe/store/appdata/datapackage/scripts/create.py @@ -0,0 +1,51 @@ +from pathlib import Path +from typing import Tuple + +from digipipe.store.utils import get_abs_dataset_path + + +def collect_files( + config: dict, + dataset_path: Path, +) -> Tuple[list, list]: + """Collect paths of source and target files for app datapackage + + Parameters + ---------- + config : dict + Dataset config + dataset_path: pathlib.Path + Path to datapackage + + Returns + ------- + list + Source files + list + Target files + """ + source_files = [] + target_files = [] + + for cat in config["resources"].keys(): + print(f"Processing {cat} ...") + for subcat in config["resources"][cat].keys(): + print(f" Processing {subcat} ...") + for item, data in config["resources"][cat][subcat].items(): + print(f" Processing {item} ...") + source_file = ( + get_abs_dataset_path( + "datasets", + data["_source_path"].get("dataset"), + data_dir=True, + ) + / data["_source_path"].get("file") + ) + target_file = dataset_path / data.get("path") + if target_file not in target_files: + source_files.append(source_file) + target_files.append(target_file) + else: + print(" Target file already collected, skipping...") + + return source_files, target_files diff --git a/digipipe/store/appdata/esys/.gitkeep b/digipipe/store/appdata/esys/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/appdata/metadata/.gitkeep b/digipipe/store/appdata/metadata/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/appdata/module.smk b/digipipe/store/appdata/module.smk new file mode 100644 index 00000000..943dcda2 --- /dev/null +++ b/digipipe/store/appdata/module.smk @@ -0,0 +1,17 @@ +""" +Dataset registry for appdata module which is loaded by main snakemake file. +All datasets in the datasets category must be added to this file. + +Template: +--------- +module : + snakefile: "/create.smk" + config: config["store"]["appdata"][""] +use rule * from as appdata__* + +""" + +module datapackage: + snakefile: "datapackage/create.smk" + config: config["store"]["appdata"]["datapackage"] +use rule * from datapackage as appdata_datapackage_* diff --git a/digipipe/store/datasets/TEMPLATE/config.yml b/digipipe/store/datasets/TEMPLATE/config.yml new file mode 100644 index 00000000..441e79a0 --- /dev/null +++ b/digipipe/store/datasets/TEMPLATE/config.yml @@ -0,0 +1,5 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Further custom configuration goes here diff --git a/digipipe/store/datasets/TEMPLATE/create.smk b/digipipe/store/datasets/TEMPLATE/create.smk new file mode 100644 index 00000000..d132bdfc --- /dev/null +++ b/digipipe/store/datasets/TEMPLATE/create.smk @@ -0,0 +1,5 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" diff --git a/digipipe/store/datasets/TEMPLATE/data/.gitkeep b/digipipe/store/datasets/TEMPLATE/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/TEMPLATE/dataset.md b/digipipe/store/datasets/TEMPLATE/dataset.md new file mode 100644 index 00000000..15bd7af4 --- /dev/null +++ b/digipipe/store/datasets/TEMPLATE/dataset.md @@ -0,0 +1,4 @@ +# Name des Datensatzes + +Eine kurze Beschreibung des Datensatzes. +Diese hilft der Dokumentation und bei der Erstellung des Berichts. diff --git a/digipipe/store/datasets/TEMPLATE/scripts/some_script.py b/digipipe/store/datasets/TEMPLATE/scripts/some_script.py new file mode 100644 index 00000000..6108a38d --- /dev/null +++ b/digipipe/store/datasets/TEMPLATE/scripts/some_script.py @@ -0,0 +1,3 @@ +""" +Example script to create this dataset +""" diff --git a/digipipe/store/datasets/app_captions/config.yml b/digipipe/store/datasets/app_captions/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/app_captions/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/app_captions/create.smk b/digipipe/store/datasets/app_captions/create.smk new file mode 100644 index 00000000..ed27536e --- /dev/null +++ b/digipipe/store/datasets/app_captions/create.smk @@ -0,0 +1,39 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json + +from digipipe.scripts.data_io import load_json +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "app_captions", data_dir=True) + +rule create_captions: + """ + Create captions for fields in app + """ + input: + [ + rules.datasets_bnetza_mastr_captions_create.output.outfile, + rules.datasets_potentialarea_wind_region_create_captions.output[0], + rules.datasets_demand_heat_region_create_captions.output[0], + ] + output: + DATASET_PATH / "captions_fields.json" + run: + captions = dict( + datasets_caption_map={}, + captions={}, + ) + + for file in input: + captions_file = load_json(file) + captions["datasets_caption_map"].update( + captions_file["datasets_caption_map"] + ) + captions["captions"].update(captions_file["captions"]) + + with open(output[0], "w", encoding="utf8") as f: + json.dump(captions, f, indent=4) diff --git a/digipipe/store/datasets/app_captions/data/.gitkeep b/digipipe/store/datasets/app_captions/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/app_captions/dataset.md b/digipipe/store/datasets/app_captions/dataset.md new file mode 100644 index 00000000..d49a5fc9 --- /dev/null +++ b/digipipe/store/datasets/app_captions/dataset.md @@ -0,0 +1,7 @@ +# Captions + +Beschriftungen für WebApp. + +Dateien: + +- Felder: `captions_fields.json` diff --git a/digipipe/store/datasets/app_settings/config.yml b/digipipe/store/datasets/app_settings/config.yml new file mode 100644 index 00000000..b6b5fde7 --- /dev/null +++ b/digipipe/store/datasets/app_settings/config.yml @@ -0,0 +1,253 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Map list for right panel +map_panel_layer_list: + boundaries: + [ + "bkg_vg250_districts_region", + "bkg_vg250_muns_region" + ] + energy: + [ + "bnetza_mastr_wind_agg_region", + "bnetza_mastr_pv_ground_agg_region", + "bnetza_mastr_pv_roof_agg_region", + "bnetza_mastr_biomass_agg_region", + "bnetza_mastr_hydro_agg_region", + "bnetza_mastr_combustion_agg_region", + "bnetza_mastr_gsgk_agg_region", + "bnetza_mastr_storage_agg_region" + ] + settlements_infrastructure: + [ + "settlement-0m_region", + "industry_region", + "road_region", + "railway_region", + "road_railway-500m_region", + "aviation_region", + "air_traffic_control_system_region", + "military_region", + "grid_region" + ] + nature_landscape: + [ + "nature_conservation_area_region", + "drinking_water_protection_area_region", + "fauna_flora_habitat_region", + "special_protection_area_region", + "biosphere_reserve_region", + "forest_region", + "water_region", + "floodplain_region", + "landscape_protection_area_region", + "soil_quality_high_region", + "soil_quality_low_region", + "less_favoured_areas_agricultural_region" + ] + +# Settings for energy panel +panel_settings_templates: + energy_settings_panel: + s_w_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_w_3: + start: none + s_w_4: + start: none + s_w_4_1: + start: none + s_w_4_2: + start: none + s_w_5: + start: none + s_w_5_1: + max: none + min: none + start: none + step: none + s_w_5_2: + max: none + min: none + start: none + step: none + s_pv_ff_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_pv_ff_3: + max: none + min: none + start: none + step: none + s_pv_ff_4: + max: none + min: none + start: none + step: none + s_pv_d_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_pv_d_3: + max: none + min: none + start: none + status_quo: none + step: none + s_pv_d_4: + max: none + min: none + start: none + step: none + status_quo: none + s_h_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + disable: none + s_v_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_v_3: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_v_4: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_v_5: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + s_s_g_1: + max: none + min: none + start: none + step: none + status_quo: none + + heat_settings_panel: + w_d_wp_1: + max: none + min: none + from-min: none + from-max: none + start: none + step: none + status_quo: none + future_scenario: none + w_d_wp_3: + max: none + min: none + from-min: none + from-max: none + start: none + step: none + w_d_wp_4: + max: none + min: none + from-min: none + from-max: none + start: none + step: none + w_d_wp_5: + max: none + min: none + from-min: none + from-max: none + start: none + step: none + w_z_wp_1: + max: none + min: none + from-min: none + from-max: none + start: none + step: none + status_quo: none + future_scenario: none + w_v_1: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + w_v_3: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + w_v_4: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + w_v_5: + max: none + min: none + start: none + step: none + status_quo: none + future_scenario: none + w_d_s_1: + max: none + min: none + start: none + step: none + w_z_s_1: + max: none + min: none + start: none + step: none + + traffic_settings_panel: + v_iv_1: + max: none + min: none + start: none + step: none + # status_quo: none + # future_scenario: none + v_iv_3: + max: none + min: none + start: none + step: none + # status_quo: none + # future_scenario: none diff --git a/digipipe/store/datasets/app_settings/create.smk b/digipipe/store/datasets/app_settings/create.smk new file mode 100644 index 00000000..6d9fdc83 --- /dev/null +++ b/digipipe/store/datasets/app_settings/create.smk @@ -0,0 +1,135 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import geopandas as gpd +import pandas as pd + +from digipipe.scripts.data_io import load_json +from digipipe.store.utils import get_abs_dataset_path +from digipipe.store.datasets.app_settings.scripts.panels import ( + PanelSettings, + add_electricity_panel_settings, + add_heat_panel_settings, + add_traffic_panel_settings +) + +DATASET_PATH = get_abs_dataset_path("datasets", "app_settings", data_dir=True) + + +rule create_map_panel_layer_list: + """ + Create layer list for right map panel + """ + input: DATASET_PATH / "energy_settings_panel.json" # Any file (no input required) + output: DATASET_PATH / "map_panel_layer_list.json" + run: + print("Creating list of layers...") + with open(output[0], "w", encoding="utf8") as f: + json.dump(config["map_panel_layer_list"], f, indent=4) + +rule create_panel_settings: + """ + Create startup settings for settings panels + """ + input: + region=rules.datasets_bkg_vg250_region_create.output, + tech_data=rules.datasets_technology_data_copy_files.output, + wind_stats=rules.datasets_bnetza_mastr_wind_region_create_power_stats_muns.output, + wind_area_stats=get_abs_dataset_path( + "datasets", "potentialarea_wind_region") / "data" / + "potentialarea_wind_area_stats_muns.csv", + pv_ground_stats=rules.datasets_bnetza_mastr_pv_ground_region_create_power_stats_muns.output, + pv_ground_area_stats=rules.datasets_potentialarea_pv_ground_region_create_area_stats_muns.output, + # pv_ground_area_shares=rules.datasets_potentialarea_pv_ground_region_create_potarea_shares.output, + pv_ground_targets=rules.datasets_potentialarea_pv_ground_region_regionalize_state_targets.output, + pv_roof_stats=rules.datasets_bnetza_mastr_pv_roof_region_create_power_stats_muns.output, + pv_roof_area_stats=rules.datasets_potentialarea_pv_roof_region_create_area_stats_muns.output.wo_historic, + pv_roof_area_deploy_stats=rules.datasets_potentialarea_pv_roof_region_create_relative_deployment_stats_muns.output, + pv_roof_targets=rules.datasets_potentialarea_pv_roof_region_regionalize_state_targets.output, + hydro_stats=rules.datasets_bnetza_mastr_hydro_region_create_power_stats_muns.output, + demand_hh_power=rules.datasets_demand_electricity_region_hh_merge_demand_years.output.demand, + demand_cts_power=rules.datasets_demand_electricity_region_cts_merge_demand_years.output.demand, + demand_ind_power=rules.datasets_demand_electricity_region_ind_merge_demand_years.output.demand, + storage_large_stats=rules.datasets_bnetza_mastr_storage_region_create_power_stats_muns.output.large, + # storage_small_stats=rules.datasets_bnetza_mastr_storage_region_create_power_stats_muns.output.small, + storage_pv_roof=rules.datasets_bnetza_mastr_storage_region_create_storage_pv_roof_stats.output, + + heating_structure_decentral=rules.datasets_demand_heat_region_heating_structure_hh_cts.output.heating_structure_esys_dec, + heating_structure_central=rules.datasets_demand_heat_region_heating_structure_hh_cts.output.heating_structure_esys_cen, + demand_hh_heat=get_abs_dataset_path("datasets", "demand_heat_region") / "data" / "demand_hh_heat_demand.csv", + demand_cts_heat=get_abs_dataset_path("datasets", "demand_heat_region") / "data" / "demand_cts_heat_demand.csv", + demand_ind_heat=get_abs_dataset_path("datasets", "demand_heat_region") / "data" / "demand_ind_heat_demand.csv", + + output: + panel_settings_electricity=DATASET_PATH / "energy_settings_panel.json", + panel_settings_heat=DATASET_PATH / "heat_settings_panel.json", + panel_settings_traffic=DATASET_PATH / "traffic_settings_panel.json" + run: + print("Creating electricity panel settings...") + panel_settings_electricity = PanelSettings( + name="panel_settings_electricity", + **config["panel_settings_templates"]["energy_settings_panel"] + ) + panel_settings_electricity = add_electricity_panel_settings( + panel_settings_electricity, + region=gpd.read_file(input.region[0]), + tech_data=load_json(input.tech_data[0]), + wind_stats=pd.read_csv(input.wind_stats[0]), + wind_area_stats=pd.read_csv(input.wind_area_stats), + pv_ground_stats=pd.read_csv(input.pv_ground_stats[0]), + pv_ground_area_stats=pd.read_csv(input.pv_ground_area_stats[0], index_col="municipality_id"), + # pv_ground_area_shares=load_json(input.pv_ground_area_shares[0]), + pv_ground_targets=load_json(input.pv_ground_targets[0]), + pv_roof_stats=pd.read_csv(input.pv_roof_stats[0]), + pv_roof_area_stats=pd.read_csv(input.pv_roof_area_stats, index_col="municipality_id"), + pv_roof_area_deploy_stats=pd.read_csv(input.pv_roof_area_deploy_stats[0]), + pv_roof_targets=load_json(input.pv_roof_targets[0]), + hydro_stats=pd.read_csv(input.hydro_stats[0]), + demand_hh_power=pd.read_csv(input.demand_hh_power, index_col="municipality_id"), + demand_cts_power=pd.read_csv(input.demand_cts_power, index_col="municipality_id"), + demand_ind_power=pd.read_csv(input.demand_ind_power, index_col="municipality_id"), + storage_large_stats=pd.read_csv(input.storage_large_stats), + # storage_small_stats=pd.read_csv(input.storage_small_stats), + storage_pv_roof=load_json(input.storage_pv_roof[0]), + ) + + print("Creating heat panel settings...") + panel_settings_heat = PanelSettings( + name="panel_settings_heat", + **config["panel_settings_templates"]["heat_settings_panel"] + ) + panel_settings_heat = add_heat_panel_settings( + panel_settings_heat, + heating_structure_decentral=pd.read_csv(input.heating_structure_decentral, index_col="year"), + heating_structure_central=pd.read_csv(input.heating_structure_central, index_col="year"), + demand_hh_heat=pd.read_csv(input.demand_hh_heat, index_col="municipality_id"), + demand_cts_heat=pd.read_csv(input.demand_cts_heat, index_col="municipality_id"), + demand_ind_heat=pd.read_csv(input.demand_ind_heat, index_col="municipality_id"), + ) + + print("Creating traffic panel settings...") + panel_settings_traffic = PanelSettings( + name="panel_settings_traffic", + **config["panel_settings_templates"]["traffic_settings_panel"] + ) + panel_settings_traffic = add_traffic_panel_settings( + panel_settings_traffic, + ) + + # Check and dump + for panel, file in zip( + [panel_settings_electricity, + panel_settings_heat, + panel_settings_traffic], + [output.panel_settings_electricity, + output.panel_settings_heat, + output.panel_settings_traffic] + ): + if panel.is_complete(): + with open(file, "w", encoding="utf8") as f: + json.dump(panel.settings, f, indent=4) + else: + raise ValueError(f"{panel.name} has missing values!") diff --git a/digipipe/store/datasets/app_settings/data/.gitkeep b/digipipe/store/datasets/app_settings/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/app_settings/dataset.md b/digipipe/store/datasets/app_settings/dataset.md new file mode 100644 index 00000000..bea77bdb --- /dev/null +++ b/digipipe/store/datasets/app_settings/dataset.md @@ -0,0 +1,70 @@ +# Settings für App + +Einstellungen für die App. + +## Layerliste (rechtes Panel) + +- Konfiguration: [config.yml](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/config.yml) --> `map_panel_layer_list` +- Ergebnisfile: `map_panel_layer_list.json` +- Wird manuell in die App eingepflegt (s. + [map_config.py](https://github.com/rl-institut/digiplan/blob/main/digiplan/map/map_config.py)) + +## Settings panels + +Die im linken Panel aufgeführten Einstellelemente (Slider und Schalter) werden +hier parametriert. + +- Konfiguration des Templates: + [config.yml](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/config.yml) --> `panel_settings_templates` +- Parametrierung der Slider und Schalter: + [panels.py](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/scripts/panels.py) +- Ergebnisfiles: + - `energy_settings_panel.json` + - `heat_settings_panel.json` + - `traffic_settings_panel.json` +- Werden in die App eingelesen + +### Parametrierung der Einstellelemente + +Für die Slider werden folgende Attribute gesetzt: +Minimum, Maximum, Schrittweite, Startwert, Status-quo-Wert, Zielwert 2045. +Diese werden wie folgt bestimmt (vgl. auch (i)-Tooltips an den Elementen): + +| **Technologie** | **Element id** | **Maximum** | **Startwert** | **Status-quo-Wert** | **Zielwert 2045** | +|-------------------------|----------------|-------------------------------------|---------------------|-------------------------------------------------|------------------------------------------------| +| Windenergie | `s_w_1` | Inst. Leistung in bestehenden VR/EG | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus Flächenziel Sachsen-Anhalt (2,2 % in 2032) | +| | `s_w_3` | - | Wie Status-quo-Wert | On | - | +| | `s_w_4` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_4_1` | - | Wie Status-quo-Wert | On | - | +| | `s_w_4_2` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_5` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_5_1` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_w_5_2` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| Freiflächen-PV | `s_pv_ff_1` | | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus EEG 2023 und regionalen Potenzialen | +| | `s_pv_ff_3` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_pv_ff_4` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| Aufdach-PV | `s_pv_d_1` | | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus EEG 2023 und regionalen Potenzialen | +| | `s_pv_d_3` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_pv_d_4` | 100 % | Wie Status-quo-Wert | Aus MaStR | - | +| Wasserkraft | `s_h_1` | Inst. Leistung 2022 | Wie Status-quo-Wert | Inst. Leistung 2022 | Inst. Leistung 2022 | +| Stromverbrauch | `s_v_1` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_3` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_4` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_5` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| Batterie-Großspeicher | `s_s_g_1` | 50 % | Wie Status-quo-Wert | Aus inst. Kapazität und Einspeisung 2022 | - | +| | | | Wie Status-quo-Wert | | | +| WP dezentral | `w_d_wp_1` | 95 % | 50 % | Inst. Leistung 2022 aus BMWK Langfristszenarien | Wert 2045 aus BMWK Langfristszenarien | +| | `w_d_wp_3` | 95 % | 50 % | - | - | +| | `w_d_wp_4` | 95 % | 50 % | - | - | +| | `w_d_wp_5` | 95 % | 50 % | - | - | +| WP zentral | `w_z_wp_1` | 95 % | 50 % | Inst. Leistung 2022 aus BMWK Langfristszenarien | Wert 2045 aus BMWK Langfristszenarien | +| Wärmeverbrauch | `w_v_1` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_3` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_4` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_5` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| Wärmespeicher dezentral | `w_d_s_1` | 200 % | 100 % | - | - | +| Wärmespeicher zentral | `w_z_s_1` | 200 % | 100 % | - | - | + +Die Maxima der Regler im Hauptpanel (`s_w_1`, `s_pv_ff_1` usw.) werden in der +App dynamisch aus den durch die UserInnen vorgenommenen Detaileinstellungen +(`s_w_3`, `s_pv_ff_1` usw.) berechnet. diff --git a/digipipe/store/datasets/app_settings/scripts/panels.py b/digipipe/store/datasets/app_settings/scripts/panels.py new file mode 100644 index 00000000..1fe9447e --- /dev/null +++ b/digipipe/store/datasets/app_settings/scripts/panels.py @@ -0,0 +1,496 @@ +import math + +import geopandas as gpd +import pandas as pd + + +class PanelSettings: + """Value store for settings panel""" + + def __init__(self, name, **settings): + """Create attributes from yaml file""" + self.name = name + self.__dict__.update(settings) + + def __str__(self): + return self.name + + @property + def settings(self): + """ + Make dictionary from value store while omitting non-data attributes + """ + return {k: v for k, v in self.__dict__.items() if k != "name"} + + def update(self, **kwargs): + """Updates control element's attributes""" + + def _keys_available(keys, keys_required): + return sorted(keys) == sorted(keys_required) + + for control, values in kwargs.items(): + if hasattr(self, control): + values_store = getattr(self, control) + if not isinstance(values, dict): + raise ValueError("Attributes are no dict.") + if _keys_available(values.keys(), values_store.keys()): + values_store.update(values) + setattr(self, control, values_store) + else: + raise ValueError( + "Attributes in given value dict do not match the " + "value store schema! Check the config.yml!" + ) + else: + raise ValueError( + f"{control} is no control element! Check the config.yml" + ) + + def is_complete(self): + """Returns True if all values are set (no default value "none" left)""" + for control, values_store in self.settings.items(): + for val in values_store.values(): + if val == "none": + print(f"Control {control} is missing at least one value.") + return False + return True + + +def add_electricity_panel_settings( + panel_settings: PanelSettings, + region: gpd.GeoDataFrame, + tech_data: dict, + wind_stats: pd.DataFrame, + wind_area_stats: pd.DataFrame, + pv_ground_stats: pd.DataFrame, + pv_ground_area_stats: pd.DataFrame, + # pv_ground_area_shares: dict, + pv_roof_stats: pd.DataFrame, + pv_roof_area_stats: pd.DataFrame, + pv_roof_area_deploy_stats: pd.DataFrame, + pv_ground_targets: dict, + pv_roof_targets: dict, + hydro_stats: pd.DataFrame, + demand_hh_power: pd.DataFrame, + demand_cts_power: pd.DataFrame, + demand_ind_power: pd.DataFrame, + storage_large_stats: pd.DataFrame, + # storage_small_stats: pd.DataFrame, + storage_pv_roof: dict, +) -> PanelSettings: + + # Wind energy + wind_search_area_start = round( + wind_stats.capacity_net.sum() + / ( + wind_area_stats[ + [ + "stp_2027_search_area_forest_area", + "stp_2027_search_area_open_area", + ] + ] + .sum() + .sum() + * tech_data["power_density"]["wind"] + ) + * 100 + ) + + panel_settings.update( + **dict( + s_w_1=dict( + max=round( + wind_area_stats.stp_2018_vreg.sum() + * tech_data["power_density"]["wind"] + ), + min=0, + start=round(wind_stats.capacity_net.sum()), + step=10, + status_quo=round(wind_stats.capacity_net.sum()), + future_scenario=round( + float(region.area_km2) + * 0.022 # Todo: Move to regulation datsset + * tech_data["power_density"]["wind"] + ), + ), + s_w_3=dict(start=True), + s_w_4=dict(start=False), + s_w_4_1=dict(start=True), + s_w_4_2=dict(start=False), + s_w_5=dict(start=False), + s_w_5_1=dict( + max=100, + min=0, + # Use theoretical values as start + # to meet the SQ capacity for sake of UX + start=wind_search_area_start, + step=5, + ), + s_w_5_2=dict( + max=100, + min=0, + # Use theoretical values as start + # to meet the SQ capacity for sake of UX + start=wind_search_area_start, + step=5, + ), + ) + ) + + # PV ground and roof + pv_ground_search_area_start = math.ceil( + pv_ground_stats.capacity_net.sum() + / ( + pv_ground_area_stats.sum().sum() + * tech_data["power_density"]["pv_ground"] + ) + * 100 + ) + pv_roof_used_start = ( + pv_roof_area_deploy_stats.capacity_net.sum() + / pv_roof_area_deploy_stats.installable_power.sum() + * 100 + ) + pv_roof_capacity_max = ( + pv_roof_area_stats[ + [ + f"installable_power_{orient}" + for orient in ["south", "north", "east", "west", "flat"] + ] + ] + .sum() + .sum() + ) + # Check if PV roof potential for current used share setting is smaller + # than actual installed capacity (which leads to too small startup value). + # If so, correct value by using installed capacity. + # The reason for this mismatch are differences in installed and installable + # (RPG dataset) capacities. + if (pv_roof_capacity_max * math.ceil(pv_roof_used_start) / 100) < round( + pv_roof_stats.capacity_net.sum() + ): + pv_roof_used_start = math.ceil( + pv_roof_stats.capacity_net.sum() / pv_roof_capacity_max * 100 + ) + + panel_settings.update( + **dict( + s_pv_ff_1=dict( + max=math.ceil( + pv_ground_area_stats.sum().sum() + * tech_data["power_density"]["pv_ground"] + ), + min=0, + start=round(pv_ground_stats.capacity_net.sum()), + step=10, + status_quo=round(pv_ground_stats.capacity_net.sum()), + future_scenario=round(pv_ground_targets["target_power_total"]), + ), + s_pv_ff_3=dict( + max=100, + min=0, + # Use theoretical values as start + # to meet the SQ capacity for sake of UX + start=pv_ground_search_area_start, + step=5, + ), + s_pv_ff_4=dict( + max=100, + min=0, + # Use theoretical values as start + # to meet the SQ capacity for sake of UX + start=pv_ground_search_area_start, + step=5, + ), + s_pv_d_1=dict( + max=round(pv_roof_capacity_max * pv_roof_used_start / 100), + min=0, + start=round(pv_roof_stats.capacity_net.sum()), + step=10, + status_quo=round(pv_roof_stats.capacity_net.sum()), + future_scenario=round(pv_roof_targets["target_power_total"]), + ), + s_pv_d_3=dict( + max=50, + min=0, + start=round(pv_roof_used_start), + step=5, + status_quo=round(pv_roof_used_start), + ), + s_pv_d_4=dict( + max=100, + min=0, + start=round( + storage_pv_roof["pv_roof_share"]["home_storages"] * 100 + ), + step=5, + status_quo=round( + storage_pv_roof["pv_roof_share"]["home_storages"] * 100 + ), + ), + ) + ) + + # Hydro + panel_settings.update( + **dict( + s_h_1=dict( + max=round(hydro_stats.capacity_net.sum()), + min=0, + start=round(hydro_stats.capacity_net.sum()), + step=1, + status_quo=round(hydro_stats.capacity_net.sum()), + future_scenario=round(hydro_stats.capacity_net.sum()), + disable=True, + ), + ) + ) + + # Demand + total_demand = (demand_hh_power + demand_cts_power + demand_ind_power).sum() + feedin_wind_pv_daily_mean = ( + ( + pv_ground_stats.capacity_net.sum() + * tech_data["full_load_hours"]["pv_ground"]["2022"] + ) + + ( + wind_stats.capacity_net.sum() + * tech_data["full_load_hours"]["wind"]["2022"] + ) + ) / 365 # Daily in MWh + + panel_settings.update( + **dict( + s_v_1=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + total_demand["2045"] / total_demand["2022"] * 100 + ), + ), + s_v_3=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_hh_power.sum()["2045"] + / demand_hh_power.sum()["2022"] + * 100 + ), + ), + s_v_4=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_cts_power.sum()["2045"] + / demand_cts_power.sum()["2022"] + * 100 + ), + ), + s_v_5=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_ind_power.sum()["2045"] + / demand_ind_power.sum()["2022"] + * 100 + ), + ), + s_s_g_1=dict( + max=50, + min=0, + start=math.ceil( + storage_large_stats.storage_capacity.sum() + / feedin_wind_pv_daily_mean + * 100, + ), + step=1, + status_quo=math.ceil( + storage_large_stats.storage_capacity.sum() + / feedin_wind_pv_daily_mean + * 100, + ), + ), + ) + ) + + return panel_settings + + +def add_heat_panel_settings( + panel_settings: PanelSettings, + heating_structure_decentral: pd.DataFrame, + heating_structure_central: pd.DataFrame, + demand_hh_heat: pd.DataFrame, + demand_cts_heat: pd.DataFrame, + demand_ind_heat: pd.DataFrame, +) -> PanelSettings: + + # Supply + heat_pump_share_dec = heating_structure_decentral.loc[ + heating_structure_decentral.carrier == "heat_pump" + ].demand_rel + heat_pump_share_cen = heating_structure_central.loc[ + heating_structure_central.carrier == "heat_pump" + ].demand_rel + + panel_settings.update( + **dict( + w_d_wp_1={ + "max": 100, + "min": 0, + "from-min": 50, + "from-max": 95, + "start": round(heat_pump_share_dec.loc[2022] * 100), + "step": 5, + "status_quo": round(heat_pump_share_dec.loc[2022] * 100), + "future_scenario": round(heat_pump_share_dec.loc[2045] * 100), + }, + w_d_wp_3={ + "max": 100, + "min": 0, + "from-min": 50, + "from-max": 95, + "start": round(heat_pump_share_dec.loc[2022] * 100), + "step": 5, + }, + w_d_wp_4={ + "max": 100, + "min": 0, + "from-min": 50, + "from-max": 95, + "start": round(heat_pump_share_dec.loc[2022] * 100), + "step": 5, + }, + w_d_wp_5={ + "max": 100, + "min": 0, + "from-min": 50, + "from-max": 95, + "start": round(heat_pump_share_dec.loc[2022] * 100), + "step": 5, + }, + w_z_wp_1={ + "max": 100, + "min": 0, + "from-min": 50, + "from-max": 95, + "start": round(heat_pump_share_cen.loc[2022] * 100), + "step": 5, + "status_quo": round(heat_pump_share_cen.loc[2022] * 100), + "future_scenario": round(heat_pump_share_cen.loc[2045] * 100), + }, + ) + ) + + # Demand + total_demand = (demand_hh_heat + demand_cts_heat + demand_ind_heat).sum() + panel_settings.update( + **dict( + w_v_1=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + total_demand["2045"] / total_demand["2022"] * 100 + ), + ), + w_v_3=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_hh_heat.sum()["2045"] + / demand_hh_heat.sum()["2022"] + * 100 + ), + ), + w_v_4=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_cts_heat.sum()["2045"] + / demand_cts_heat.sum()["2022"] + * 100 + ), + ), + w_v_5=dict( + max=200, + min=50, + start=100, + step=10, + status_quo=100, + future_scenario=round( + demand_ind_heat.sum()["2045"] + / demand_ind_heat.sum()["2022"] + * 100 + ), + ), + ) + ) + + # Storages + panel_settings.update( + **dict( + w_d_s_1=dict( + max=200, + min=25, + start=100, + step=5, + ), + w_z_s_1=dict( + max=200, + min=25, + start=100, + step=5, + ), + ) + ) + + return panel_settings + + +def add_traffic_panel_settings( + panel_settings: PanelSettings, +) -> PanelSettings: + + panel_settings.update( + **dict( + v_iv_1=dict( + max=100, + min=0, + start=0, # TODO + step=5, + # status_quo=0, # TODO + # future_scenario=0, # TODO + ), + v_iv_3=dict( + max=100, + min=0, + start=0, # TODO + step=5, + # status_quo=0, # TODO + # future_scenario=0, # TODO + ), + ) + ) + + return panel_settings diff --git a/digipipe/store/datasets/bkg_vg250_districts_region/config.yml b/digipipe/store/datasets/bkg_vg250_districts_region/config.yml new file mode 100644 index 00000000..1cc6ee1b --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_districts_region/config.yml @@ -0,0 +1,12 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +layer: vg250_krs +attributes: + {"NUTS": "nuts", + "GEN": "name", + "geometry": "geometry"} +attributes_filter: + {"GF": 4} diff --git a/digipipe/store/datasets/bkg_vg250_districts_region/create.smk b/digipipe/store/datasets/bkg_vg250_districts_region/create.smk new file mode 100644 index 00000000..eb5e4372 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_districts_region/create.smk @@ -0,0 +1,21 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "bkg_vg250_districts_region") + +rule create: + """ + Extract districts of region + """ + input: rules.preprocessed_bkg_vg250_create.output + output: DATASET_PATH / "data" / "bkg_vg250_districts_region.gpkg" + params: + script=DATASET_PATH / "scripts" / "create.py", + config_path=DATASET_PATH / "config.yml" + shell: + "python {params.script} {input} {params.config_path} {output}" diff --git a/digipipe/store/datasets/bkg_vg250_districts_region/data/.gitkeep b/digipipe/store/datasets/bkg_vg250_districts_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bkg_vg250_districts_region/dataset.md b/digipipe/store/datasets/bkg_vg250_districts_region/dataset.md new file mode 100644 index 00000000..48d3e376 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_districts_region/dataset.md @@ -0,0 +1,4 @@ +# Landkreise + +Landkreise der Region aus Geodaten der Verwaltungsgebiete extrahiert und +nach Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). diff --git a/digipipe/store/datasets/bkg_vg250_districts_region/scripts/create.py b/digipipe/store/datasets/bkg_vg250_districts_region/scripts/create.py new file mode 100644 index 00000000..1d186b74 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_districts_region/scripts/create.py @@ -0,0 +1,45 @@ +import sys + +import geopandas as gpd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.config import read_config +from digipipe.scripts.geo import ( + convert_to_multipolygon, + rename_filter_attributes, + reproject_simplify, + write_geofile, +) + + +def process(): + data = gpd.read_file(infile, layer=config["layer"]) + data = rename_filter_attributes( + gdf=data, + attrs_filter_by_values=config["attributes_filter"], + attrs_mapping=config["attributes"], + ) + data = reproject_simplify( + gdf=data, + add_id_column=True, + ) + + data = convert_to_multipolygon(data) + + data = data.assign(area_km2=data.area / 1e6) + + write_geofile( + gdf=data, + file=outfile, + layer_name=config["layer"], + ) + + +if __name__ == "__main__": + infile = sys.argv[1] + config = read_config(sys.argv[2]) + config["attributes_filter"]["NUTS"] = GLOBAL_CONFIG["global"]["geodata"][ + "nuts" + ] + outfile = sys.argv[3] + process() diff --git a/digipipe/store/datasets/bkg_vg250_federal_states/config.yml b/digipipe/store/datasets/bkg_vg250_federal_states/config.yml new file mode 100644 index 00000000..ad316c7e --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_federal_states/config.yml @@ -0,0 +1,12 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +layer: vg250_lan +attributes: + {"NUTS": "nuts", + "GEN": "name", + "geometry": "geometry"} +attributes_filter: + {"GF": 4} diff --git a/digipipe/store/datasets/bkg_vg250_federal_states/create.smk b/digipipe/store/datasets/bkg_vg250_federal_states/create.smk new file mode 100644 index 00000000..e07f135d --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_federal_states/create.smk @@ -0,0 +1,21 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "bkg_vg250_federal_states") + +rule create: + """ + Extract land mass of federal states + """ + input: rules.preprocessed_bkg_vg250_create.output + output: DATASET_PATH / "data" / "bkg_vg250_federal_states.gpkg" + params: + script=DATASET_PATH / "scripts" / "create.py", + config_path=DATASET_PATH / "config.yml" + shell: + "python {params.script} {input} {params.config_path} {output}" diff --git a/digipipe/store/datasets/bkg_vg250_federal_states/data/.gitkeep b/digipipe/store/datasets/bkg_vg250_federal_states/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bkg_vg250_federal_states/dataset.md b/digipipe/store/datasets/bkg_vg250_federal_states/dataset.md new file mode 100644 index 00000000..9df7cd92 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_federal_states/dataset.md @@ -0,0 +1,4 @@ +# Bundesländer + +Bundesländergrenzen aus Geodaten der Verwaltungsgebiete extrahiert und nach +Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). diff --git a/digipipe/store/datasets/bkg_vg250_federal_states/scripts/create.py b/digipipe/store/datasets/bkg_vg250_federal_states/scripts/create.py new file mode 100644 index 00000000..35032ca0 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_federal_states/scripts/create.py @@ -0,0 +1,41 @@ +import sys + +import geopandas as gpd + +from digipipe.scripts.config import read_config +from digipipe.scripts.geo import ( + convert_to_multipolygon, + rename_filter_attributes, + reproject_simplify, + write_geofile, +) + + +def process(): + data = gpd.read_file(infile, layer=config["layer"]) + data = rename_filter_attributes( + gdf=data, + attrs_filter_by_values=config["attributes_filter"], + attrs_mapping=config["attributes"], + ) + data = reproject_simplify( + gdf=data, + add_id_column=True, + ) + + data = convert_to_multipolygon(data) + + data = data.assign(area_km2=data.area / 1e6) + + write_geofile( + gdf=data, + file=outfile, + layer_name=config["layer"], + ) + + +if __name__ == "__main__": + infile = sys.argv[1] + config = read_config(sys.argv[2]) + outfile = sys.argv[3] + process() diff --git a/digipipe/store/datasets/bkg_vg250_muns_region/config.yml b/digipipe/store/datasets/bkg_vg250_muns_region/config.yml new file mode 100644 index 00000000..0f838533 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_muns_region/config.yml @@ -0,0 +1,12 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +layer: vg250_gem +attributes: + {"AGS": "ags", + "GEN": "name", + "geometry": "geometry"} +attributes_filter: + {"GF": 4} diff --git a/digipipe/store/datasets/bkg_vg250_muns_region/create.smk b/digipipe/store/datasets/bkg_vg250_muns_region/create.smk new file mode 100644 index 00000000..48e756f9 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_muns_region/create.smk @@ -0,0 +1,19 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path, PATH_TO_REGION_DISTRICTS_GPKG + +DATASET_PATH = get_abs_dataset_path("datasets", "bkg_vg250_muns_region") + +rule create: + """ + Extract municipalities of region + """ + input: + muns=rules.preprocessed_bkg_vg250_create.output, + districts=PATH_TO_REGION_DISTRICTS_GPKG + output: DATASET_PATH / "data" / "bkg_vg250_muns_region.gpkg" + script: DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/datasets/bkg_vg250_muns_region/data/.gitkeep b/digipipe/store/datasets/bkg_vg250_muns_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bkg_vg250_muns_region/dataset.md b/digipipe/store/datasets/bkg_vg250_muns_region/dataset.md new file mode 100644 index 00000000..f0b4e7ad --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_muns_region/dataset.md @@ -0,0 +1,4 @@ +# Gemeinden + +Gemeinden der Region aus Geodaten der Verwaltungsgebiete extrahiert und +nach Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). diff --git a/digipipe/store/datasets/bkg_vg250_muns_region/scripts/create.py b/digipipe/store/datasets/bkg_vg250_muns_region/scripts/create.py new file mode 100644 index 00000000..6a9ff782 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_muns_region/scripts/create.py @@ -0,0 +1,48 @@ +import geopandas as gpd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.geo import ( + convert_to_multipolygon, + overlay, + rename_filter_attributes, + reproject_simplify, + write_geofile, +) + + +def process(): + muns = gpd.read_file(snakemake.input.muns[0], layer=config["layer"]) + muns = rename_filter_attributes( + gdf=muns, + attrs_filter_by_values=config["attributes_filter"], + attrs_mapping=config["attributes"], + ) + muns = reproject_simplify( + gdf=muns, + add_id_column=True, + ) + + muns = overlay( + gdf=muns.rename(columns={"id": "municipality_id"}), + gdf_overlay=gpd.read_file(snakemake.input.districts), + retain_rename_overlay_columns={"id": "district_id"}, + gdf_use_centroid=True, + ).rename(columns={"municipality_id": "id"}) + + muns = muns.assign(area_km2=muns.area / 1e6) + + muns = convert_to_multipolygon(muns) + + write_geofile( + gdf=muns, + file=snakemake.output[0], + layer_name=config["layer"], + ) + + +if __name__ == "__main__": + config = snakemake.config + config["attributes_filter"]["NUTS"] = GLOBAL_CONFIG["global"]["geodata"][ + "nuts" + ] + process() diff --git a/digipipe/store/datasets/bkg_vg250_region/config.yml b/digipipe/store/datasets/bkg_vg250_region/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_region/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/bkg_vg250_region/create.smk b/digipipe/store/datasets/bkg_vg250_region/create.smk new file mode 100644 index 00000000..268a9e63 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_region/create.smk @@ -0,0 +1,21 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "bkg_vg250_region") + +rule create: + """ + Create region polygon from districts + """ + input: rules.datasets_bkg_vg250_districts_region_create.output + output: DATASET_PATH / "data" / "bkg_vg250_region.gpkg" + params: + script=DATASET_PATH / "scripts" / "create.py", + config_path=DATASET_PATH / "config.yml" + shell: + "python {params.script} {input} {params.config_path} {output}" diff --git a/digipipe/store/datasets/bkg_vg250_region/data/.gitkeep b/digipipe/store/datasets/bkg_vg250_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bkg_vg250_region/dataset.md b/digipipe/store/datasets/bkg_vg250_region/dataset.md new file mode 100644 index 00000000..b6e6928f --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_region/dataset.md @@ -0,0 +1,3 @@ +# Region + +Region aus Geodaten der Landkreise zusammengeführt. diff --git a/digipipe/store/datasets/bkg_vg250_region/scripts/create.py b/digipipe/store/datasets/bkg_vg250_region/scripts/create.py new file mode 100644 index 00000000..1f12968c --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_region/scripts/create.py @@ -0,0 +1,33 @@ +import os +import sys + +import geopandas as gpd + +from digipipe.scripts.geo import ( + convert_to_multipolygon, + reproject_simplify, + write_geofile, +) + + +def process(): + data = gpd.read_file(infile) + data = gpd.GeoDataFrame( + crs=data.crs.srs, geometry=[data.buffer(0.1).unary_union] + ) + data = reproject_simplify(gdf=data, add_id_column=True) + data = convert_to_multipolygon(data) + + data = data.assign(area_km2=data.area / 1e6) + + write_geofile( + gdf=data, + file=outfile, + layer_name=os.path.basename(outfile).split(".")[0], + ) + + +if __name__ == "__main__": + infile = sys.argv[1] + outfile = sys.argv[3] + process() diff --git a/digipipe/store/datasets/bkg_vg250_state/config.yml b/digipipe/store/datasets/bkg_vg250_state/config.yml new file mode 100644 index 00000000..e5fb2ad5 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_state/config.yml @@ -0,0 +1,11 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +layer: vg250_sta +attributes: + {"GEN": "name", + "geometry": "geometry"} +attributes_filter: + {"GF": 4} diff --git a/digipipe/store/datasets/bkg_vg250_state/create.smk b/digipipe/store/datasets/bkg_vg250_state/create.smk new file mode 100644 index 00000000..35b48129 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_state/create.smk @@ -0,0 +1,21 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "bkg_vg250_state") + +rule create: + """ + Extract land mass of Germany + """ + input: rules.preprocessed_bkg_vg250_create.output + output: DATASET_PATH / "data" / "bkg_vg250_state.gpkg" + params: + script=DATASET_PATH / "scripts" / "create.py", + config_path=DATASET_PATH / "config.yml" + shell: + "python {params.script} {input} {params.config_path} {output}" diff --git a/digipipe/store/datasets/bkg_vg250_state/data/.gitkeep b/digipipe/store/datasets/bkg_vg250_state/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bkg_vg250_state/dataset.md b/digipipe/store/datasets/bkg_vg250_state/dataset.md new file mode 100644 index 00000000..973fb5fa --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_state/dataset.md @@ -0,0 +1,4 @@ +# Staat + +Staatsgrenze aus Geodaten der Verwaltungsgebiete extrahiert und nach Landmasse +gefiltert (Geofaktor 4 = "mit Struktur Land"). diff --git a/digipipe/store/datasets/bkg_vg250_state/scripts/create.py b/digipipe/store/datasets/bkg_vg250_state/scripts/create.py new file mode 100644 index 00000000..35032ca0 --- /dev/null +++ b/digipipe/store/datasets/bkg_vg250_state/scripts/create.py @@ -0,0 +1,41 @@ +import sys + +import geopandas as gpd + +from digipipe.scripts.config import read_config +from digipipe.scripts.geo import ( + convert_to_multipolygon, + rename_filter_attributes, + reproject_simplify, + write_geofile, +) + + +def process(): + data = gpd.read_file(infile, layer=config["layer"]) + data = rename_filter_attributes( + gdf=data, + attrs_filter_by_values=config["attributes_filter"], + attrs_mapping=config["attributes"], + ) + data = reproject_simplify( + gdf=data, + add_id_column=True, + ) + + data = convert_to_multipolygon(data) + + data = data.assign(area_km2=data.area / 1e6) + + write_geofile( + gdf=data, + file=outfile, + layer_name=config["layer"], + ) + + +if __name__ == "__main__": + infile = sys.argv[1] + config = read_config(sys.argv[2]) + outfile = sys.argv[3] + process() diff --git a/digipipe/store/datasets/bnetza_mastr_biomass_region/config.yml b/digipipe/store/datasets/bnetza_mastr_biomass_region/config.yml new file mode 100644 index 00000000..789341b8 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_biomass_region/config.yml @@ -0,0 +1,34 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "KwkMastrNummer": "kwk_mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Nettonennleistung": "capacity_net", + "Bruttoleistung": "capacity_gross", + "ThermischeNutzleistung": "th_capacity", + "Einspeisungsart": "feedin_type", + "Technologie": "technology", + "Hauptbrennstoff": "fuel", + "Biomasseart": "fuel_type", + "AusschliesslicheVerwendungBiomasse": "biomass_only", + "BiogasInanspruchnahmeFlexiPraemie": "flexibility_bonus", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"]} + +# Output settings +layer: bnetza_mastr_biomass diff --git a/digipipe/store/datasets/bnetza_mastr_biomass_region/create.smk b/digipipe/store/datasets/bnetza_mastr_biomass_region/create.smk new file mode 100644 index 00000000..8418f946 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_biomass_region/create.smk @@ -0,0 +1,60 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_biomass_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract biomass units for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_biomass_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_biomass_region.gpkg", + outfile_agg=DATASET_PATH + / "data" + / "bnetza_mastr_biomass_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_biomass_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_biomass_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) diff --git a/digipipe/store/datasets/bnetza_mastr_biomass_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_biomass_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_biomass_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_biomass_region/dataset.md new file mode 100644 index 00000000..ff6a69c7 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_biomass_region/dataset.md @@ -0,0 +1,22 @@ +# Biomasse-/Biogasanlagen + +Biomasse-/Biogasanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_biomass_stats_muns.csv`. diff --git a/digipipe/store/datasets/bnetza_mastr_biomass_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_biomass_region/scripts/create.py new file mode 100644 index 00000000..ca48097d --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_biomass_region/scripts/create.py @@ -0,0 +1,117 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + "th_capacity": ("th_capacity", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_captions/config.yml b/digipipe/store/datasets/bnetza_mastr_captions/config.yml new file mode 100644 index 00000000..923c3eee --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_captions/config.yml @@ -0,0 +1,8 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +additional_captions: + {"voltage_level": "Spannungsebene", + "geometry_approximated": "Standort genähert", + "unit_count": "Anzahl Anlagen"} diff --git a/digipipe/store/datasets/bnetza_mastr_captions/create.smk b/digipipe/store/datasets/bnetza_mastr_captions/create.smk new file mode 100644 index 00000000..010af6b9 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_captions/create.smk @@ -0,0 +1,34 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.scripts.config import load_dataset_configs +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_captions") + +def get_mastr_configs() -> dict: + return { + k: v for k, v in load_dataset_configs().get("store")["datasets"].items() + if k.startswith("bnetza_mastr") + } + + +rule create: + """ + Create name mapping file for MaStR dataset + """ + input: + mastr_configs=expand( + get_abs_dataset_path("datasets", "{name}",) / "config.yml", + name=get_mastr_configs().keys() + ) + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_attribute_captions.json", + params: + mastr_configs=get_mastr_configs(), + additional_captions=config["additional_captions"] + script: + DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/datasets/bnetza_mastr_captions/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_captions/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_captions/dataset.md b/digipipe/store/datasets/bnetza_mastr_captions/dataset.md new file mode 100644 index 00000000..ca7c9313 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_captions/dataset.md @@ -0,0 +1,5 @@ +# Bezeichner und Namen aus MaStR + +Bezeichner und Namen aus MaStR als Mapping -> + wobei CamelCase aus in Leerzeichen konvertiert +werden. diff --git a/digipipe/store/datasets/bnetza_mastr_captions/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_captions/scripts/create.py new file mode 100644 index 00000000..f4ada156 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_captions/scripts/create.py @@ -0,0 +1,34 @@ +import json +import re + + +def process() -> None: + def replace_umlaut(s: str) -> str: + replace_dict = {"ae": "ä", "oe": "ö", "ue": "ü", "Mastr": "MaStR"} + for k, v in replace_dict.items(): + s = s.replace(k, v) + return s + + # Build mapping dict + naming_dict = dict() + for mastr_config in snakemake.params.mastr_configs.values(): + for k, v in mastr_config.get("attributes", {}).items(): + if naming_dict.get(k, None) is None: + naming_dict[v] = replace_umlaut( + re.sub("([a-z])([A-Z])", "\g<1> \g<2>", k) + ) + naming_dict.update(snakemake.params.additional_captions) + + naming_dict = { + "datasets_caption_map": { + _: "mastr" for _ in snakemake.params.mastr_configs.keys() + }, + "captions": {"mastr": naming_dict}, + } + + # Dump as JSON while preserving umlaute + with open(snakemake.output.outfile, "w", encoding="utf8") as f: + json.dump(naming_dict, f, ensure_ascii=False, indent=4) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_combustion_region/config.yml b/digipipe/store/datasets/bnetza_mastr_combustion_region/config.yml new file mode 100644 index 00000000..ce7ed7be --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_combustion_region/config.yml @@ -0,0 +1,36 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "KwkMastrNummer": "kwk_mastr_id", + "Kraftwerksnummer": "bnetza_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameKraftwerk": "name", + "NameKraftwerksblock": "block_name", + "Einsatzort": "usage_sector", + "Nettonennleistung": "capacity_net", + "Bruttoleistung": "capacity_gross", + "ThermischeNutzleistung": "th_capacity", + "Einspeisungsart": "feedin_type", + "Technologie": "technology", + "Hauptbrennstoff": "fuel_primary", + "WeitererHauptbrennstoff": "fuel_secondary", + "WeitereBrennstoffe": "fuel_other", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"]} + +# Output settings +layer: bnetza_mastr_combustion diff --git a/digipipe/store/datasets/bnetza_mastr_combustion_region/create.smk b/digipipe/store/datasets/bnetza_mastr_combustion_region/create.smk new file mode 100644 index 00000000..50bd51c3 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_combustion_region/create.smk @@ -0,0 +1,62 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "bnetza_mastr_combustion_region" +) +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract combustion units for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_combustion_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_combustion_region.gpkg", + outfile_agg=DATASET_PATH + / "data" + / "bnetza_mastr_combustion_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_combustion_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_combustion_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) diff --git a/digipipe/store/datasets/bnetza_mastr_combustion_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_combustion_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_combustion_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_combustion_region/dataset.md new file mode 100644 index 00000000..4e7bf5d9 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_combustion_region/dataset.md @@ -0,0 +1,22 @@ +# Verbrennungskraftwerke + +Verbrennungskraftwerke in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_combustion_stats_muns.csv`. diff --git a/digipipe/store/datasets/bnetza_mastr_combustion_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_combustion_region/scripts/create.py new file mode 100644 index 00000000..03445983 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_combustion_region/scripts/create.py @@ -0,0 +1,142 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + df_merge_string_columns, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + units_without_geom["fuel_primary"].fillna("", inplace=True) + units_without_geom["fuel_secondary"].fillna("", inplace=True) + + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + "th_capacity": ("th_capacity", "sum"), + "fuel_primary": ("fuel_primary", ";".join), + "fuel_secondary": ("fuel_secondary", ";".join), + }, + ) + + # Merge fuel types into one and make unique + units_with_inferred_geom_agg_gdf["fuels"] = df_merge_string_columns( + units_with_inferred_geom_agg_gdf[["fuel_primary", "fuel_secondary"]] + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]).drop( + columns=["fuel_primary", "fuel_secondary"] + ) + units_agg = pd.concat( + [ + units_with_geom.assign( + unit_count=1, + fuels=( + units_with_geom["fuel_primary"].fillna("") + + "; " + + units_with_geom["fuel_secondary"].fillna("") + ), + ), + units_with_inferred_geom_agg_gdf, + ] + ).drop(columns=["fuel_primary", "fuel_secondary"]) + else: + units = units_with_geom.drop(columns=["fuel_primary", "fuel_secondary"]) + units_agg = units_with_geom.assign( + unit_count=1, + fuels=( + units_with_geom["fuel_primary"].fillna("") + + "; " + + units_with_geom["fuel_secondary"].fillna("") + ), + ).drop(columns=["fuel_primary", "fuel_secondary"]) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_gsgk_region/config.yml b/digipipe/store/datasets/bnetza_mastr_gsgk_region/config.yml new file mode 100644 index 00000000..51a0fe1d --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_gsgk_region/config.yml @@ -0,0 +1,31 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "KwkMastrNummer": "kwk_mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Nettonennleistung": "capacity_net", + "Bruttoleistung": "capacity_gross", + "ThermischeNutzleistung": "th_capacity", + "Einheittyp": "type", + "Einspeisungsart": "feedin_type", + "Technologie": "technology", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"]} + +# Output settings +layer: bnetza_mastr_gsgk diff --git a/digipipe/store/datasets/bnetza_mastr_gsgk_region/create.smk b/digipipe/store/datasets/bnetza_mastr_gsgk_region/create.smk new file mode 100644 index 00000000..e72ac087 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_gsgk_region/create.smk @@ -0,0 +1,58 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_gsgk_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract gsgk units for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_gsgk_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_gsgk_region.gpkg", + outfile_agg=DATASET_PATH / "data" / "bnetza_mastr_gsgk_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_gsgk_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_gsgk_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) diff --git a/digipipe/store/datasets/bnetza_mastr_gsgk_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_gsgk_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_gsgk_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_gsgk_region/dataset.md new file mode 100644 index 00000000..27308310 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_gsgk_region/dataset.md @@ -0,0 +1,23 @@ +# Geo- oder Solarthermie-, Grubengas- und Klärschlamm-Anlagen + +Anlagen der Geo- oder Solarthermie, Grubengas und Klärschlamm in der Region +aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_gsgk_stats_muns.csv`. diff --git a/digipipe/store/datasets/bnetza_mastr_gsgk_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_gsgk_region/scripts/create.py new file mode 100644 index 00000000..ca48097d --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_gsgk_region/scripts/create.py @@ -0,0 +1,117 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + "th_capacity": ("th_capacity", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_hydro_region/config.yml b/digipipe/store/datasets/bnetza_mastr_hydro_region/config.yml new file mode 100644 index 00000000..0cfdd71f --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_hydro_region/config.yml @@ -0,0 +1,30 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "KwkMastrNummer": "kwk_mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Nettonennleistung": "capacity_net", + "Bruttoleistung": "capacity_gross", + "ArtDerWasserkraftanlage": "plant_type", + "ArtDesZuflusses": "water_origin", + "Einspeisungsart": "feedin_type", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"]} + +# Output settings +layer: bnetza_mastr_hydro diff --git a/digipipe/store/datasets/bnetza_mastr_hydro_region/create.smk b/digipipe/store/datasets/bnetza_mastr_hydro_region/create.smk new file mode 100644 index 00000000..8429d6a5 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_hydro_region/create.smk @@ -0,0 +1,58 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_hydro_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract hydro units for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_hydro_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_hydro_region.gpkg", + outfile_agg=DATASET_PATH / "data" / "bnetza_mastr_hydro_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_hydro_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_hydro_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) diff --git a/digipipe/store/datasets/bnetza_mastr_hydro_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_hydro_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_hydro_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_hydro_region/dataset.md new file mode 100644 index 00000000..bca9d68e --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_hydro_region/dataset.md @@ -0,0 +1,22 @@ +# Wasserkraftanlagen + +Wasserkraftanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_hydro_stats_muns.csv`. diff --git a/digipipe/store/datasets/bnetza_mastr_hydro_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_hydro_region/scripts/create.py new file mode 100644 index 00000000..c2314120 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_hydro_region/scripts/create.py @@ -0,0 +1,116 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_pv_ground_region/config.yml b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/config.yml new file mode 100644 index 00000000..016a39f3 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/config.yml @@ -0,0 +1,42 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Bruttoleistung": "capacity_gross", + "Nettonennleistung": "capacity_net", + "Lage": "site_type", + "Einspeisungsart": "feedin_type", + "AnzahlModule": "module_count", + "Nutzungsbereich": "usage_sector", + "Hauptausrichtung": "orientation_primary", + "HauptausrichtungNeigungswinkel": "orientation_primary_angle", + "Nebenausrichtung": "orientation_secondary", + "NebenausrichtungNeigungswinkel": "orientation_secondary_angle", + "EinheitlicheAusrichtungUndNeigungswinkel": "orientation_uniform", + "Leistungsbegrenzung": "power_limitation", + "ArtDerFlaeche": "area_type", + "InAnspruchGenommeneFlaeche": "area_occupied", + "InAnspruchGenommeneAckerflaeche": "agricultural_area_occupied", + "Buergerenergie": "citizens_unit", + "MieterstromZugeordnet": "landlord_to_tenant_electricity", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"], + "Lage": "Freifläche"} + +# Output settings +layer: bnetza_mastr_pv_ground diff --git a/digipipe/store/datasets/bnetza_mastr_pv_ground_region/create.smk b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/create.smk new file mode 100644 index 00000000..8322c96d --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/create.smk @@ -0,0 +1,107 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd +import pandas as pd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_pv_ground_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract ground-mounted PV plants for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_solar_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + unit_correction=get_abs_dataset_path( + "raw", "bnetza_mastr_correction_region" + ) + / "data" + / "bnetza_mastr_pv_ground_region_correction.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_pv_ground_region.gpkg", + outfile_agg=DATASET_PATH + / "data" + / "bnetza_mastr_pv_ground_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_pv_ground_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_pv_ground_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) + + +rule create_development_over_time: + """ + Create stats on development (per year) of cumulative total installed + capacity and cumulative number of operating units + """ + input: + agg_region=DATASET_PATH / "data" / "bnetza_mastr_pv_ground_region.gpkg", + output: + DATASET_PATH + / "data" + / "bnetza_mastr_pv_ground_development_over_time.csv", + run: + df = gpd.read_file(input.agg_region) + df = df.loc[df["status"] == "In Betrieb"] + df["commissioning_date"] = pd.to_datetime(df["commissioning_date"]) + + df["year"] = df["commissioning_date"].dt.year + + df_capacity_over_time = ( + df.groupby("year")["capacity_net"] + .sum() + .cumsum() # Apply cumulative sum + .reset_index() + ) + + df_units_cumulative = ( + df.groupby("year") + .agg(unit_count=("mastr_id", "count")) + .cumsum() + .reset_index() + ) + df_combined = df_capacity_over_time.merge( + df_units_cumulative, on="year" + ) + df_combined["capacity_net"] = ( + df_combined["capacity_net"].div(1e3).round(1) + ) + df_combined["year"] = df_combined["year"].astype(int) + df_combined.to_csv(output[0], index=False) diff --git a/digipipe/store/datasets/bnetza_mastr_pv_ground_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_pv_ground_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/dataset.md new file mode 100644 index 00000000..03025825 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/dataset.md @@ -0,0 +1,30 @@ +# Photovoltaik-Freiflächenanlagen + +Photovoltaik-Freiflächenanlagen in der Region aus MaStR-Registerdaten als +Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_pv_ground_stats_muns.csv`. + +## Datenkorrektur + +Einige Anlagen sind hinsichtlich Ihrer geografischen Lage oder Typs fehlerhaft. +Anhand des Datensatzes +[bnetza_mastr_correction_region](../../raw/bnetza_mastr_correction_region/dataset.md) +wird für diese Anlagen eine Datenkorrektur vorgenommen. diff --git a/digipipe/store/datasets/bnetza_mastr_pv_ground_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/scripts/create.py new file mode 100644 index 00000000..81b093e3 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_ground_region/scripts/create.py @@ -0,0 +1,128 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + index_col="EinheitMastrNummer", + ) + + # Apply corrections + unit_correction = pd.read_csv( + snakemake.input.unit_correction, + delimiter=";", + index_col="mastr_id", + ) + units = mastr.apply_manual_corrections( + units_df=units, + units_correction_df=unit_correction, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_pv_roof_region/config.yml b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/config.yml new file mode 100644 index 00000000..c10615d4 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/config.yml @@ -0,0 +1,46 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Bruttoleistung": "capacity_gross", + "Nettonennleistung": "capacity_net", + "Lage": "site_type", + "Einspeisungsart": "feedin_type", + "AnzahlModule": "module_count", + "Nutzungsbereich": "usage_sector", + "Hauptausrichtung": "orientation_primary", + "HauptausrichtungNeigungswinkel": "orientation_primary_angle", + "Nebenausrichtung": "orientation_secondary", + "NebenausrichtungNeigungswinkel": "orientation_secondary_angle", + "EinheitlicheAusrichtungUndNeigungswinkel": "orientation_uniform", + "Leistungsbegrenzung": "power_limitation", + "ArtDerFlaeche": "area_type", + "InAnspruchGenommeneFlaeche": "area_occupied", + "InAnspruchGenommeneAckerflaeche": "agricultural_area_occupied", + "Buergerenergie": "citizens_unit", + "MieterstromZugeordnet": "landlord_to_tenant_electricity", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"], + "Lage": [ + "Bauliche Anlagen (Hausdach, Gebäude und Fassade)", + "Bauliche Anlagen (Sonstige)", + "Steckerfertige Erzeugungsanlage (sog. Plug-In- oder Balkon-PV-Anlage)" + ]} + +# Output settings +layer: bnetza_mastr_pv_roof diff --git a/digipipe/store/datasets/bnetza_mastr_pv_roof_region/create.smk b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/create.smk new file mode 100644 index 00000000..49551482 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/create.smk @@ -0,0 +1,105 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd +import pandas as pd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_pv_roof_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract roof-mounted PV plants for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_solar_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + unit_correction=get_abs_dataset_path( + "raw", "bnetza_mastr_correction_region" + ) + / "data" + / "bnetza_mastr_pv_roof_region_correction.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_pv_roof_region.gpkg", + outfile_agg=DATASET_PATH + / "data" + / "bnetza_mastr_pv_roof_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_pv_roof_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_pv_roof_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) + + +rule create_development_over_time: + """ + Create stats on development (per year) of cumulative total installed + capacity and cumulative number of operating units + """ + input: + agg_region=DATASET_PATH / "data" / "bnetza_mastr_pv_roof_region.gpkg", + output: + DATASET_PATH / "data" / "bnetza_mastr_pv_roof_development_over_time.csv", + run: + df = gpd.read_file(input.agg_region) + df = df.loc[df["status"] == "In Betrieb"] + df["commissioning_date"] = pd.to_datetime(df["commissioning_date"]) + + df["year"] = df["commissioning_date"].dt.year + + df_capacity_over_time = ( + df.groupby("year")["capacity_net"] + .sum() + .cumsum() # Apply cumulative sum + .reset_index() + ) + + df_units_cumulative = ( + df.groupby("year") + .agg(unit_count=("mastr_id", "count")) + .cumsum() + .reset_index() + ) + df_combined = df_capacity_over_time.merge( + df_units_cumulative, on="year" + ) + df_combined["capacity_net"] = ( + df_combined["capacity_net"].div(1e3).round(1) + ) + df_combined["year"] = df_combined["year"].astype(int) + df_combined.to_csv(output[0], index=False) diff --git a/digipipe/store/datasets/bnetza_mastr_pv_roof_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_pv_roof_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/dataset.md new file mode 100644 index 00000000..504210f3 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/dataset.md @@ -0,0 +1,30 @@ +# Photovoltaik-Aufdachanlagen + +Photovoltaik-Aufdachanlagen in der Region aus MaStR-Registerdaten als +Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_pv_roof_stats_muns.csv`. + +## Datenkorrektur + +Einige Anlagen sind hinsichtlich Ihrer geografischen Lage oder Typs fehlerhaft. +Anhand des Datensatzes +[bnetza_mastr_correction_region](../../raw/bnetza_mastr_correction_region/dataset.md) +wird für diese Anlagen eine Datenkorrektur vorgenommen. diff --git a/digipipe/store/datasets/bnetza_mastr_pv_roof_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/scripts/create.py new file mode 100644 index 00000000..b395aaa3 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_pv_roof_region/scripts/create.py @@ -0,0 +1,129 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + index_col="EinheitMastrNummer", + ) + + # Apply corrections + unit_correction = pd.read_csv( + snakemake.input.unit_correction, + delimiter=";", + index_col="mastr_id", + ) + units = mastr.apply_manual_corrections( + units_df=units, + units_correction_df=unit_correction, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + drop_location_id=False, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_storage_region/config.yml b/digipipe/store/datasets/bnetza_mastr_storage_region/config.yml new file mode 100644 index 00000000..c85268eb --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_storage_region/config.yml @@ -0,0 +1,49 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Threshold to split between large and small battery storages in MWh +battery_size_threshold: 0.1 + +# Filter settings +unit_attributes: + {"EinheitMastrNummer": "mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "Bruttoleistung": "capacity_gross", + "Nettonennleistung": "capacity_net", + "Einspeisungsart": "feedin_type", + "Einheittyp": "type", + "Batterietechnologie": "battery_technology", + "AcDcKoppelung": "ac_dc_coupling", + "Pumpspeichertechnologie": "pumped_hydro_technology", + "Notstromaggregat": "emergency_generator", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +unit_attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"]} + +plant_attributes: + {"VerknuepfteEinheit": "unit_mastr_id", + "MastrNummer": "plant_mastr_id", + "NutzbareSpeicherkapazitaet": "storage_capacity"} + +# Output settings +layer: bnetza_mastr_storage + +# Filter for PV home storages +home_storages: + only_single_storages: true + storage_capacity_thres_max: 30 + storage_power_thres_max: 30 + only_single_pv_roof_units: true + pv_roof_capacity_thres_min: 2 + pv_roof_capacity_thres_max: 30 diff --git a/digipipe/store/datasets/bnetza_mastr_storage_region/create.smk b/digipipe/store/datasets/bnetza_mastr_storage_region/create.smk new file mode 100644 index 00000000..0756a2ff --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_storage_region/create.smk @@ -0,0 +1,224 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import json +import geopandas as gpd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_storage_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract storages for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_storage_raw.csv", + units_capacity=SOURCE_DATASET_PATH / "bnetza_mastr_storage_unit_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + pv_roof_units=rules.datasets_bnetza_mastr_pv_roof_region_create.output.outfile, + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_storage_region.gpkg", + outfile_agg=DATASET_PATH + / "data" + / "bnetza_mastr_storage_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and capacity per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_storage_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + total=DATASET_PATH / "data" / "bnetza_mastr_storage_stats_muns.csv", + large=DATASET_PATH + / "data" + / "bnetza_mastr_storage_large_stats_muns.csv", + small=DATASET_PATH + / "data" + / "bnetza_mastr_storage_small_stats_muns.csv", + run: + units = gpd.read_file(input.units) + units["storage_capacity"] = units["storage_capacity"].div( + 1e3 + ) # kW to MW + muns = gpd.read_file(input.region_muns) + print("Battery storages:") + + # All storage units + units_total = create_stats_per_municipality( + units_df=units, muns=muns, column="storage_capacity" + ) + print( + f" Storage capacity (total): " + f"{units_total.storage_capacity.sum().round(3)} MWh" + ) + units_total.to_csv(output.total) + + # Large storage units + units_large = create_stats_per_municipality( + units_df=units.loc[ + units.storage_capacity >= config.get("battery_size_threshold") + ], + muns=muns, + column="storage_capacity", + ) + print( + f" Storage capacity (large): " + f"{units_large.storage_capacity.sum().round(3)} MWh" + ) + units_large.to_csv(output.large) + + # Small storage units + units_small = create_stats_per_municipality( + units_df=units.loc[ + units.storage_capacity < config.get("battery_size_threshold") + ], + muns=muns, + column="storage_capacity", + ) + print( + f" Storage capacity (small): " + f"{units_small.storage_capacity.sum().round(3)} MWh" + ) + units_small.to_csv(output.small) + + +rule create_storage_pv_roof_stats: + """ + Calculate share of PV with storages, specific capacity and specific power of + storages relative to PV power. + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_storage_region.gpkg", + pv_roof_units=rules.datasets_bnetza_mastr_pv_roof_region_create.output.outfile, + output: + DATASET_PATH / "data" / "bnetza_mastr_storage_pv_roof.json", + run: + # PV home units: guess PV home systems + pv_roof_units = gpd.read_file(input.pv_roof_units)[ + ["mastr_id", "capacity_net"] + ] + hs_cfg = config.get("home_storages") + mask = ( + pv_roof_units.capacity_net + >= hs_cfg.get("pv_roof_capacity_thres_min") + ) & ( + pv_roof_units.capacity_net + <= hs_cfg.get("pv_roof_capacity_thres_max") + ) + pv_roof_units_small = pv_roof_units.loc[mask] + + # Storage units: calc specific values + units = gpd.read_file(input.units)[ + [ + "capacity_net", + "storage_capacity", + "mastr_location_id", + "pv_roof_unit_count", + "pv_roof_unit_capacity_sum", + ] + ] + units_unique_loc = units.groupby("mastr_location_id").agg( + storage_count=("storage_capacity", "count"), + storage_capacity=("storage_capacity", "sum"), + capacity_net=("capacity_net", "mean"), + pv_roof_unit_count=("pv_roof_unit_count", "first"), + pv_roof_unit_capacity_sum=("pv_roof_unit_capacity_sum", "first"), + ) + + # Filter storages + mask = ( + ( + units_unique_loc.storage_count == 1 + if hs_cfg.get("only_single_storages") + else units_unique_loc.storage_count > 0 + ) + & ( + units_unique_loc.storage_capacity + <= hs_cfg.get("storage_capacity_thres_max") + ) + & ( + units_unique_loc.capacity_net + <= hs_cfg.get("storage_power_thres_max") + ) + & ( + units_unique_loc.pv_roof_unit_count == 1 + if hs_cfg.get("only_single_pv_roof_units") + else units_unique_loc.pv_roof_unit_count > 0 + ) + & ( + units_unique_loc.pv_roof_unit_capacity_sum + >= hs_cfg.get("pv_roof_capacity_thres_min") + ) + & ( + units_unique_loc.pv_roof_unit_capacity_sum + <= hs_cfg.get("pv_roof_capacity_thres_max") + ) + ) + units_unique_loc_small = units_unique_loc.loc[mask] + + with open(output[0], "w", encoding="utf8") as f: + json.dump( + { + "pv_roof_share": { + "all_storages": round( + len(units_unique_loc) / len(pv_roof_units), + 3, + ), + "home_storages": round( + len(units_unique_loc_small) + / len(pv_roof_units_small), + 3, + ), + }, + "specific_capacity": { + "all_storages": round( + units_unique_loc.storage_capacity.sum() + / units_unique_loc.pv_roof_unit_capacity_sum.sum(), + 2, + ), + "home_storages": round( + units_unique_loc_small.storage_capacity.sum() + / units_unique_loc_small.pv_roof_unit_capacity_sum.sum(), + 2, + ), + }, + "specific_power": { + "all_storages": round( + units_unique_loc.capacity_net.sum() + / units_unique_loc.pv_roof_unit_capacity_sum.sum(), + 2, + ), + "home_storages": round( + units_unique_loc_small.capacity_net.sum() + / units_unique_loc_small.pv_roof_unit_capacity_sum.sum(), + 2, + ), + }, + }, + f, + indent=4, + ) diff --git a/digipipe/store/datasets/bnetza_mastr_storage_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_storage_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_storage_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_storage_region/dataset.md new file mode 100644 index 00000000..06469720 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_storage_region/dataset.md @@ -0,0 +1,38 @@ +# Speicheranlagen + +Speicheranlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Es wird weiterhin geprüft, ob dem Speicher eine oder mehrere PV-Aufdachanlagen +zugeordnet sind, es wird die Anzahl und Summe der Nettonennleistung berechnet. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Weiterhin erfolgt eine Auswertung der installierten Gesamtleistung je Gemeinde: + +- Alle Speicher: `bnetza_mastr_storage_stats_muns.csv` +- Großspeicher (>=100 kWh): `bnetza_mastr_storage_large_stats_muns.csv` +- Kleinspeicher (<100 kWh): `bnetza_mastr_storage_small_stats_muns.csv` + +`bnetza_mastr_storage_pv_roof.json` enthält die spezifische Speicherkapazität +sowie spezifische Nennleistung der Speicher (bezogen auf die installierte +Leistung von PV-Aufdachanlagen), aggregiert für gesamte Region, für folgende +Randbedingungen: + +- Alle PV-Anlagen: `all_storages` +- PV-Anlagen mit 2..20 kWp sowie Batteriespeicher <20 kWh und <20 kW (kann in + [config.yml](config.yml) unter `home_storages` konfiguriert werden): + `home_storages` diff --git a/digipipe/store/datasets/bnetza_mastr_storage_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_storage_region/scripts/create.py new file mode 100644 index 00000000..ba70600b --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_storage_region/scripts/create.py @@ -0,0 +1,166 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + unit_attrs = snakemake.config["unit_attributes"] + unit_attrs_filter = snakemake.config["unit_attributes_filter"] + unit_attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + # Read units + units = pd.read_csv( + snakemake.input.units, + usecols=(set(unit_attrs.keys()) | set(unit_attrs_filter.keys())), + dtype={"Postleitzahl": str}, + ) + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=unit_attrs_filter, + attrs_mapping=unit_attrs, + ).set_index("mastr_id") + + # Read plants (for storage capacity) + plants = pd.read_csv( + snakemake.input.units_capacity, + usecols=snakemake.config["plant_attributes"].keys(), + ) + plants = rename_filter_attributes( + gdf=plants, + attrs_mapping=snakemake.config["plant_attributes"], + ) + + # Merge storage capacity + units = units.merge( + plants, + left_on="mastr_id", + right_on="unit_mastr_id", + how="left", + ) + units_count_wo_capacity = len(units.loc[units.plant_mastr_id.isna()]) + if units_count_wo_capacity > 0: + print( + f"{units_count_wo_capacity} storages have no plant associated and " + f"hence no storage capacity assigned." + ) + units.drop(columns=["unit_mastr_id", "plant_mastr_id"], inplace=True) + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + drop_location_id=False, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + "storage_capacity": ("storage_capacity", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + # Search for associated PV roof units and save count and sum of nom. power + pv_roof_units = gpd.read_file(snakemake.input.pv_roof_units)[ + ["mastr_location_id", "capacity_net"] + ] + pv_roof_units = ( + pv_roof_units[["mastr_location_id", "capacity_net"]] + .groupby("mastr_location_id") + .agg( + pv_roof_unit_count=("capacity_net", "count"), + pv_roof_unit_capacity_sum=("capacity_net", "sum"), + ) + .reset_index() + ) + units = units.merge( + pv_roof_units, + left_on="mastr_location_id", + right_on="mastr_location_id", + how="left", + ) + units = units.assign( + pv_roof_unit_count=units.pv_roof_unit_count.fillna(0), + pv_roof_unit_capacity_sum=units.pv_roof_unit_capacity_sum.fillna(0), + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/bnetza_mastr_wind_region/config.yml b/digipipe/store/datasets/bnetza_mastr_wind_region/config.yml new file mode 100644 index 00000000..60fbb8de --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_wind_region/config.yml @@ -0,0 +1,40 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Filter settings +attributes: + {"EinheitMastrNummer": "mastr_id", + "EinheitBetriebsstatus": "status", + "LokationMastrNummer": "mastr_location_id", + "Postleitzahl": "zip_code", + "Ort": "city", + "Laengengrad": "lon", + "Breitengrad": "lat", + "Inbetriebnahmedatum": "commissioning_date", + "GeplantesInbetriebnahmedatum": "commissioning_date_planned", + "DatumEndgueltigeStilllegung": "decommissioning_date", + "NameStromerzeugungseinheit": "name", + "NameWindpark": "name_park", + "Bruttoleistung": "capacity_gross", + "Nettonennleistung": "capacity_net", + "Lage": "site_type", + "Hersteller": "manufacturer_name", + "Typenbezeichnung": "type_name", + "Nabenhoehe": "hub_height", + "Rotordurchmesser": "rotor_diameter", + "AuflageAbschaltungLeistungsbegrenzung": "constraint_deactivation_sound_emission", + "AuflagenAbschaltungSchallimmissionsschutzNachts": "constraint_deactivation_sound_emission_night", + "AuflagenAbschaltungSchallimmissionsschutzTagsueber": "constraint_deactivation_sound_emission_day", + "AuflagenAbschaltungSchattenwurf": "constraint_deactivation_shadowing", + "AuflagenAbschaltungTierschutz": "constraint_deactivation_animals", + "AuflagenAbschaltungEiswurf": "constraint_deactivation_ice", + "Buergerenergie": "citizens_unit", + "NetzbetreiberpruefungStatus": "validated_by_system_operator"} +attributes_filter: + {"Land": "Deutschland", + "EinheitBetriebsstatus": ["In Betrieb", "In Planung"], + "Lage": "Windkraft an Land"} + +# Output settings +layer: bnetza_mastr_wind diff --git a/digipipe/store/datasets/bnetza_mastr_wind_region/create.smk b/digipipe/store/datasets/bnetza_mastr_wind_region/create.smk new file mode 100644 index 00000000..de9b6e21 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_wind_region/create.smk @@ -0,0 +1,98 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd +import pandas as pd + +from digipipe.scripts.datasets.mastr import create_stats_per_municipality +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG, +) + +DATASET_PATH = get_abs_dataset_path("datasets", "bnetza_mastr_wind_region") +SOURCE_DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bnetza_mastr", data_dir=True +) + + +rule create: + """ + Extract wind turbines for region + """ + input: + units=SOURCE_DATASET_PATH / "bnetza_mastr_wind_raw.csv", + locations=SOURCE_DATASET_PATH + / "bnetza_mastr_locations_extended_raw.csv", + gridconn=SOURCE_DATASET_PATH / "bnetza_mastr_grid_connections_raw.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG, + output: + outfile=DATASET_PATH / "data" / "bnetza_mastr_wind_region.gpkg", + outfile_agg=DATASET_PATH / "data" / "bnetza_mastr_wind_agg_region.gpkg", + params: + config_file=DATASET_PATH / "config.yml", + script: + DATASET_PATH / "scripts" / "create.py" + + +rule create_power_stats_muns: + """ + Create stats on installed count of units and power per mun + """ + input: + units=DATASET_PATH / "data" / "bnetza_mastr_wind_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + DATASET_PATH / "data" / "bnetza_mastr_wind_stats_muns.csv", + run: + units = create_stats_per_municipality( + units_df=gpd.read_file(input.units), + muns=gpd.read_file(input.region_muns), + column="capacity_net", + ) + units["capacity_net"] = units["capacity_net"].div(1e3) # kW to MW + units.to_csv(output[0]) + + +rule create_development_over_time: + """ + Create stats on development (per year) of cumulative total installed + capacity and cumulative number of operating units + """ + input: + agg_region=DATASET_PATH / "data" / "bnetza_mastr_wind_region.gpkg", + output: + DATASET_PATH / "data" / "bnetza_mastr_wind_development_over_time.csv", + run: + df = gpd.read_file(input.agg_region) + df = df.loc[df["status"] == "In Betrieb"] + df["commissioning_date"] = pd.to_datetime(df["commissioning_date"]) + + df["year"] = df["commissioning_date"].dt.year + + df_capacity_over_time = ( + df.groupby("year")["capacity_net"] + .sum() + .cumsum() # Apply cumulative sum + .reset_index() + ) + + df_units_cumulative = ( + df.groupby("year") + .agg(unit_count=("mastr_id", "count")) + .cumsum() + .reset_index() + ) + df_combined = df_capacity_over_time.merge( + df_units_cumulative, on="year" + ) + df_combined["capacity_net"] = ( + df_combined["capacity_net"].div(1e3).round(1) + ) + df_combined["year"] = df_combined["year"].astype(int) + df_combined.to_csv(output[0], index=False) diff --git a/digipipe/store/datasets/bnetza_mastr_wind_region/data/.gitkeep b/digipipe/store/datasets/bnetza_mastr_wind_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/bnetza_mastr_wind_region/dataset.md b/digipipe/store/datasets/bnetza_mastr_wind_region/dataset.md new file mode 100644 index 00000000..732e6900 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_wind_region/dataset.md @@ -0,0 +1,22 @@ +# Windenergieanlagen + +Windenergieanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_wind_stats_muns.csv`. diff --git a/digipipe/store/datasets/bnetza_mastr_wind_region/scripts/create.py b/digipipe/store/datasets/bnetza_mastr_wind_region/scripts/create.py new file mode 100644 index 00000000..c2314120 --- /dev/null +++ b/digipipe/store/datasets/bnetza_mastr_wind_region/scripts/create.py @@ -0,0 +1,116 @@ +import geopandas as gpd +import pandas as pd + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.datasets import mastr +from digipipe.scripts.geo import ( + overlay, + rename_filter_attributes, + write_geofile, +) +from digipipe.store.utils import ( + PATH_TO_REGION_DISTRICTS_GPKG, + get_names_from_nuts, +) + + +def process() -> None: + attrs = snakemake.config["attributes"] + attrs_filter = snakemake.config["attributes_filter"] + attrs_filter["Landkreis"] = get_names_from_nuts( + PATH_TO_REGION_DISTRICTS_GPKG, + GLOBAL_CONFIG["global"]["geodata"]["nuts"], + ) + + units = pd.read_csv( + snakemake.input.units, + usecols=set(attrs.keys()) | set(attrs_filter.keys()), + dtype={"Postleitzahl": str}, + ) + + units = rename_filter_attributes( + gdf=units, + attrs_filter_by_values=attrs_filter, + attrs_mapping=attrs, + ).set_index("mastr_id") + + units = mastr.add_voltage_level( + units_df=units, + locations_path=snakemake.input.locations, + gridconn_path=snakemake.input.gridconn, + ) + + # Add geometry and drop units without coords and + # add column to indicate that location from original data was used + units_with_geom = mastr.add_geometry(units) + units_with_geom = units_with_geom.assign( + geometry_approximated=0, + ) + + units_without_geom = units.loc[(units.lon.isna() | units.lat.isna())].drop( + columns=["lon", "lat"] + ) + + # Add geometry for all units without coords (<=30 kW) and + # add column to indicate that location was inferred by geocoding + if len(units_without_geom) > 0: + ( + units_with_inferred_geom_gdf, + units_with_inferred_geom_agg_gdf, + ) = mastr.geocode_units_wo_geometry( + units_without_geom, + columns_agg_functions={ + "capacity_net": ("capacity_net", "sum"), + "unit_count": ("capacity_net", "count"), + "capacity_gross": ("capacity_gross", "sum"), + }, + ) + + # Merge both GDFs + units = pd.concat([units_with_geom, units_with_inferred_geom_gdf]) + + units_agg = pd.concat( + [ + units_with_geom.assign(unit_count=1), + units_with_inferred_geom_agg_gdf, + ] + ) + else: + units = units_with_geom + units_agg = units_with_geom.assign(unit_count=1) + + # Clip to region and add mun and district ids + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units = overlay( + gdf=units, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + units_agg = overlay( + gdf=units_agg, + gdf_overlay=gpd.read_file(snakemake.input.region_districts), + retain_rename_overlay_columns={"id": "district_id"}, + ) + + write_geofile( + gdf=units, + file=snakemake.output.outfile, + layer_name=snakemake.config["layer"], + ) + write_geofile( + gdf=units_agg, + file=snakemake.output.outfile_agg, + layer_name=snakemake.config["layer"], + ) + + +process() diff --git a/digipipe/store/datasets/demand_electricity_region/config.yml b/digipipe/store/datasets/demand_electricity_region/config.yml new file mode 100644 index 00000000..c8934a8c --- /dev/null +++ b/digipipe/store/datasets/demand_electricity_region/config.yml @@ -0,0 +1,34 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Household demand: years to create demand prognoses for +hh_electricity_demand: + years: + [ + 2022, + #2035, + 2045 + ] + +# CTS demand: years to create demand prognoses for +cts_electricity_demand: + years: + [ + 2022, + #2035, + 2045 + ] + +# Industry demand: years to create demand prognoses for +ind_electricity_demand: + years: + [ + 2022, + #2035, + 2045 + ] + +# Industry demand: dataset to be used for today's industrial power consumption, +# must be one of ["demandregio", "stala"] +ind_electricity_demand_source: "stala" diff --git a/digipipe/store/datasets/demand_electricity_region/create.smk b/digipipe/store/datasets/demand_electricity_region/create.smk new file mode 100644 index 00000000..bfb8083f --- /dev/null +++ b/digipipe/store/datasets/demand_electricity_region/create.smk @@ -0,0 +1,302 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import difflib +import geopandas as gpd +import pandas as pd + +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG +) +from digipipe.scripts.datasets.demand import ( + demand_prognosis, + disaggregate_demand_to_municipality, + merge_demand_multiple_years, + normalize_filter_timeseries +) + +DATASET_PATH = get_abs_dataset_path("datasets", "demand_electricity_region") + +rule hh_normalize_timeseries: + """ + Extract household electricity demand timeseries for districts, merge and + normalize them + """ + input: + timeseries=get_abs_dataset_path("preprocessed", "demandregio") / + "data" / "dr_hh_power_timeseries_2022.csv", + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + timeseries=DATASET_PATH / "data" / "demand_hh_power_timeseries.csv" + run: + normalize_filter_timeseries( + infile=input.timeseries, + outfile=output.timeseries, + region_nuts=gpd.read_file(input.region_districts).nuts.to_list(), + ) + +rule hh_disaggregate_demand: + """ + Disaggregate household electricity demand from districts to + municipalities for one year and create prognosis + """ + input: + demand_today_region=get_abs_dataset_path("preprocessed", "demandregio") / + "data" / "dr_hh_power_demand_2022.csv", + demand_future_TN=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "TN-Strom_hh_demand_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_hh_demand_reformatted.csv", + population=get_abs_dataset_path("datasets", "population_region") / + "data" / "population.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + demand=DATASET_PATH / "data" / "demand_hh_power_demand_{year}.csv" + run: + population = pd.read_csv(input.population, header=[0, 1], index_col=0) + population.columns = population.columns.droplevel(1) + # Today's demand + demand_districts = pd.read_csv( + input.demand_today_region + ).set_index("nuts3").sum(axis=1).to_frame(name="demand_districts") * 1e3 + demand = disaggregate_demand_to_municipality( + demand_districts=demand_districts, + muns=gpd.read_file(input.region_muns), + districts=gpd.read_file(input.region_districts), + disagg_data=population, + disagg_data_col=str(wildcards.year), + ) + # Future demand + if int(wildcards.year) > 2022: + demand = demand_prognosis( + demand_future_T45=input.demand_future_T45, + demand_future_TN=input.demand_future_TN, + demand_region=demand, + year_base=2022, + year_target=int(wildcards.year), + scale_by="carrier", + carrier="electricity", + ) + demand.to_csv(output.demand) + +rule hh_merge_demand_years: + """ + Merge the electricity demands from different years into one + """ + input: + demand=expand( + DATASET_PATH / "data" / "demand_hh_power_demand_{year}.csv", + year=config["hh_electricity_demand"]["years"] + ) + output: + demand=DATASET_PATH / "data" / "demand_hh_power_demand.csv" + run: + merge_demand_multiple_years( + infiles=input.demand, + outfile=output.demand, + ) + +rule cts_normalize_timeseries: + """ + Extract CTS electricity demand timeseries for districts, merge and + normalize them + """ + input: + timeseries=get_abs_dataset_path("preprocessed", "demandregio") / + "data" / "dr_cts_power_timeseries_2022.csv", + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + timeseries=DATASET_PATH / "data" / "demand_cts_power_timeseries.csv" + run: + normalize_filter_timeseries( + infile=input.timeseries, + outfile=output.timeseries, + region_nuts=gpd.read_file(input.region_districts).nuts.to_list(), + ) + +rule cts_disaggregate_demand: + """ + Disaggregate CTS electricity demand from districts to municipalities + for one year + """ + input: + demand_today_region=get_abs_dataset_path( + "preprocessed", "demandregio") / "data" / + "dr_cts_power_demand_2022.csv", + demand_future_TN=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios" + ) / "data" / "TN-Strom_cts_demand_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_cts_demand_reformatted.csv", + employment=get_abs_dataset_path("datasets", "employment_region") / + "data" / "employment.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + demand=DATASET_PATH / "data" / "demand_cts_power_demand_{year}.csv" + run: + # Today's demand + demand_districts = pd.read_csv( + input.demand_today_region, + index_col=0 + ).sum(axis=0).T.to_frame(name="demand_districts") + demand = disaggregate_demand_to_municipality( + demand_districts=demand_districts, + muns=gpd.read_file(input.region_muns), + districts=gpd.read_file(input.region_districts), + disagg_data=pd.read_csv( + input.employment, + index_col=0, + ), + disagg_data_col="employees_total" + ) + # Future demand + if int(wildcards.year) > 2022: + demand = demand_prognosis( + demand_future_T45=input.demand_future_T45, + demand_future_TN=input.demand_future_TN, + demand_region=demand, + year_base=2022, + year_target=int(wildcards.year), + scale_by="carrier", + carrier="electricity", + ) + demand.rename(columns={"employees_total": wildcards.year}).to_csv(output.demand) + +rule cts_merge_demand_years: + """ + Merge the electricity demands from different years into one + """ + input: + demand=expand( + DATASET_PATH / "data" / "demand_cts_power_demand_{year}.csv", + year=config["cts_electricity_demand"]["years"] + ) + output: + demand=DATASET_PATH / "data" / "demand_cts_power_demand.csv" + run: + merge_demand_multiple_years( + infiles=input.demand, + outfile=output.demand, + ) + +rule ind_normalize_timeseries: + """ + Extract industry electricity demand timeseries for districts, merge and + normalize them + """ + input: + timeseries=get_abs_dataset_path("preprocessed", "demandregio") / + "data" / "dr_ind_power_timeseries_2022.csv", + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + timeseries=DATASET_PATH / "data" / "demand_ind_power_timeseries.csv" + run: + normalize_filter_timeseries( + infile=input.timeseries, + outfile=output.timeseries, + region_nuts=gpd.read_file(input.region_districts).nuts.to_list(), + ) + +rule ind_disaggregate_demand: + """ + Disaggregate industry electricity demand from districts to + municipalities for one year + """ + input: + demand_today_region_dr=get_abs_dataset_path( + "preprocessed", "demandregio") / + "data" / "dr_ind_power_demand_2022.csv", + demand_today_region_stala=get_abs_dataset_path( + "preprocessed", "stala_st_energy") / + "data" / "power_demand_industry_st_districts.csv", + demand_future_TN=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "TN-Strom_ind_demand_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_ind_demand_reformatted.csv", + employment=get_abs_dataset_path("datasets", "employment_region") / + "data" / "employment.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + demand=DATASET_PATH / "data" / "demand_ind_power_demand_{year}.csv" + run: + districts = gpd.read_file(input.region_districts) + # Today's demand: use dataset defined in config + if config["ind_electricity_demand_source"] == "demandregio": + demand_districts = pd.read_csv( + input.demand_today_region_dr, + index_col=0 + ).sum(axis=0).T.to_frame(name="demand_districts") + elif config["ind_electricity_demand_source"] == "stala": + demand_districts = pd.read_csv( + input.demand_today_region_stala, + usecols=["name", "2021"], + index_col="name" + ) + districts["name"] = districts["name"].map( + lambda _: difflib.get_close_matches( + _, demand_districts.index)[0] + ) + demand_districts = demand_districts.merge(districts,on="name")[ + ["nuts", "2021"]].rename(columns={ + "2021": "demand_districts"}).set_index("nuts") + else: + raise ValueError( + "ind_electricity_demand_source must be one of " + "['demandregio', 'stala']" + ) + + # Disaggregate + demand = disaggregate_demand_to_municipality( + demand_districts=demand_districts, + muns=gpd.read_file(input.region_muns), + districts=districts, + disagg_data=pd.read_csv( + input.employment, + index_col=0, + ), + disagg_data_col="employees_ind" + ) + + # Future demand + if int(wildcards.year) > 2022: + demand = demand_prognosis( + demand_future_T45=input.demand_future_T45, + demand_future_TN=input.demand_future_TN, + demand_region=demand, + year_base=2022, + year_target=int(wildcards.year), + scale_by="total", + carrier="electricity", + ) + demand.rename( + columns={"employees_ind": wildcards.year} + ).to_csv(output.demand) + +rule ind_merge_demand_years: + """ + Merge the electricity demands from different years into one + """ + input: + demand=expand( + DATASET_PATH / "data" / "demand_ind_power_demand_{year}.csv", + year=config["ind_electricity_demand"]["years"] + ) + output: + demand=DATASET_PATH / "data" / "demand_ind_power_demand.csv" + run: + merge_demand_multiple_years( + infiles=input.demand, + outfile=output.demand, + ) diff --git a/digipipe/store/datasets/demand_electricity_region/data/.gitkeep b/digipipe/store/datasets/demand_electricity_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/demand_electricity_region/dataset.md b/digipipe/store/datasets/demand_electricity_region/dataset.md new file mode 100644 index 00000000..de221aa3 --- /dev/null +++ b/digipipe/store/datasets/demand_electricity_region/dataset.md @@ -0,0 +1,76 @@ +# Strombedarf + +Nettostrombedarfe und -zeitreihen für Haushalte, GHD und Industrie je Gemeinde. + +Die Berechnung der regionalen Prognosewerte je Verbrauchssektor erfolgt anhand +landesweiter Prognosen aus den +[BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md). + +## Haushalte + +- Jährlicher Strombedarf je Gemeinde in MWh aus + [DemandRegio](../../preprocessed/demandregio/dataset.md), von Landkreis- auf + Gemeindeebene disaggregiert anhand von Bevölkerungsprognosen + ([STALA ST](../../preprocessed/stala_st_pop_prog/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + Strombedarfs (ohne Wärmegewinnung) aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +## GHD + +- Jährlicher Strombedarf je Gemeinde in MWh aus + [DemandRegio](../../preprocessed/demandregio/dataset.md), von Landkreis- auf + Gemeindeebene disaggregiert anhand von sozialversicherungspflichtig + Beschäftigten im Jahr 2022 + ([BA für Arbeit](../../preprocessed/ba_employment/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + Strombedarfs (ohne Wärmegewinnung) aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird. Basis bilden + sowohl SLP- als auch branchenspezifische Profile. Aufgrund der geringen + Differenzen zwischen den Landkreisen werden diese gemittelt. Differenzen + zwischen verschiedenen Jahren bestehen nur aufgrund der Lage von Wochenenden + und Feiertagen und werden daher vernachlässigt. + +## Industrie + +- Jährlicher Strombedarf je Gemeinde in MWh. Hierfür stehen 2 Datensätze zur + Verfügung - welcher verwendet wird, kann in der [Konfiguration](config.yml) + via `ind_electricity_demand_source` eingestellt werden: + - [DemandRegio](../../preprocessed/demandregio/dataset.md): Werte für alle + Landkreise in Deutschland. + - [STALA ST](../../preprocessed/stala_st_energy/dataset.md) (Standard): + Genauere Werte, jedoch nur für Sachsen-Anhalt verfügbar. +- Die Desaggregation von Landkreis- auf Gemeindeebene erfolgt anhand der + Beschäftigten im verarbeitenden Gewerbe im Jahr 2022 + ([Regionalstatistik](../../preprocessed/regiostat/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + industriellen Gesamtenergiebedarfs aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Im Unterschied zu Haushalten und GHD liegen die Daten für den + Wärme- und Stromanteil nicht getrennt vor, sodass auf den + Gesamtenergiebedarf zurückgegriffen wird. + Es wird das Szenario "TN-Strom" als Grundlage für den Status quo verwendet und + Werte für 2022 interpoliert. Die Zielwerte werden dem Szenario "T45-Strom" + entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird. Basis bilden + sowohl SLP- als auch branchenspezifische Profile. Aufgrund der geringen + Differenzen zwischen den Landkreisen werden diese gemittelt. Differenzen + zwischen verschiedenen Jahren bestehen nur aufgrund der Lage von Wochenenden + und Feiertagen und werden daher vernachlässigt. diff --git a/digipipe/store/datasets/demand_electricity_region/scripts/create.py b/digipipe/store/datasets/demand_electricity_region/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/demand_heat_region/__init__.py b/digipipe/store/datasets/demand_heat_region/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/demand_heat_region/config.yml b/digipipe/store/datasets/demand_heat_region/config.yml new file mode 100644 index 00000000..8a8502da --- /dev/null +++ b/digipipe/store/datasets/demand_heat_region/config.yml @@ -0,0 +1,103 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Share of heat demand in district heating networks of total heat demand +# Format: municipality_id: share +district_heating_share: + # Households + hh: + {0: 0.36, # Dessau-Roßlau + 1: 0, + 2: 0.11, # Bitterfeld-Wolfen + 3: 0.07, # Köthen + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + 10: 0, + 11: 0, + 12: 0, + 13: 0, + 14: 0, + 15: 0, + 16: 0, + 17: 0, + 18: 0.15, # Wittenberg + 19: 0} + # CTS + cts: + {0: 0.36, # Dessau-Roßlau + 1: 0, + 2: 0.11, # Bitterfeld-Wolfen + 3: 0.07, # Köthen + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + 10: 0, + 11: 0, + 12: 0, + 13: 0, + 14: 0, + 15: 0, + 16: 0, + 17: 0, + 18: 0.15, # Wittenberg + 19: 0} + # industry + ind: + {0: 0.19, # Dessau-Roßlau + 1: 0, + 2: 0.21, # Bitterfeld-Wolfen + 3: 0.21, # Köthen + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + 10: 0, + 11: 0, + 12: 0, + 13: 0, + 14: 0, + 15: 0, + 16: 0, + 17: 0, + 18: 0.01, # Wittenberg + 19: 0} + +# Heating structure +heating_structure: + years: + [2022, 2045] + +# Aggregation of carriers used in technologies generating central heat +# in 2022 and 2045 +carrier_to_agg: + 2022: + other_fossil_fuel_chp: "methane" + waste_chp: "biomass" + biomass_chp: "biomass" + electricity_direct_heating: "electricity_direct_heating" + gas_boiler: "methane" + methane_chp: "methane" + heat_pump: "heat_pump" + solar_thermal: "solar_thermal" + hydrogen_chp: "hydrogen" + hydrogen_boiler: "hydrogen" + 2045: + waste_chp: "biomass" + biomass_chp: "biomass" + electricity_direct_heating: "electricity_direct_heating" + gas_boiler: "methane" + methane_chp: "biomass" + heat_pump: "heat_pump" + solar_thermal: "solar_thermal" + hydrogen_chp: "biomass" + hydrogen_boiler: "hydrogen" diff --git a/digipipe/store/datasets/demand_heat_region/create.smk b/digipipe/store/datasets/demand_heat_region/create.smk new file mode 100644 index 00000000..915d58b0 --- /dev/null +++ b/digipipe/store/datasets/demand_heat_region/create.smk @@ -0,0 +1,695 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import json +import geopandas as gpd +import numpy as np +import pandas as pd +from pathlib import Path +from digipipe.scripts.data_io import load_json +from digipipe.scripts.geo import clip_raster, raster_zonal_stats +from digipipe.store.utils import ( + get_abs_dataset_path, + get_abs_store_root_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG +) +from digipipe.scripts.datasets.demand import ( + demand_prognosis, + disaggregate_demand_to_municipality, + normalize_filter_timeseries +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "demand_heat_region", data_dir=True +) + +rule raster_clip: + """ + Clip raster file to boundary + """ + input: + heat_demand=get_abs_dataset_path( + "preprocessed", "seenergies_peta5") / "data" / "HD_2015_{sector}_Peta5_0_1_GJ.tif", + boundary=get_abs_store_root_path() / "datasets" / "{boundary_dataset}" / "data" / "{boundary_dataset}.gpkg" + output: + heat_demand = DATASET_PATH / "HD_2015_{sector}_Peta5_0_1_GJ_{boundary_dataset}.tif" + run: + clip_raster( + raster_file_in=input.heat_demand, + clip_file=input.boundary, + raster_file_out=output.heat_demand, + ) + +rule raster_convert_to_mwh: + """ + Convert TIFF raster to geopackage and change unit from GJ to MWh + Note: Unit conversion takes place after clipping as it takes too long to + apply to entire dataset for Europe. + """ + input: DATASET_PATH / "HD_2015_{sector}_Peta5_0_1_GJ_{boundary_dataset}.tif" + output: DATASET_PATH / "HD_2015_{sector}_Peta5_0_1_MWh_{boundary_dataset}.tif" + shell: + """ + gdal_calc.py -A {input} --A_band=1 --outfile={output} --calc="A/3.6" + rm {input} + """ + +rule create_raster_zonal_stats: + """ + Create zonal heat statistics (heat demand per geographical unit) + """ + input: + heat_demand=DATASET_PATH / "HD_2015_{sector}_Peta5_0_1_MWh_{boundary_dataset}.tif", + clip_file=get_abs_store_root_path() / "datasets" / "{boundary_dataset}" / "data" / "{boundary_dataset}.gpkg" + output: + heat_demand=DATASET_PATH / "demand_heat_zonal_stats-{sector}-{boundary_dataset}.gpkg" + run: + raster_zonal_stats( + raster_file_in=input.heat_demand, + clip_file=input.clip_file, + zonal_file_out=output.heat_demand, + var_name="heat_demand", + stats="sum" + ) + +SECTOR_MAPPING = {"hh": "res", "cts": "ser"} + +rule heat_demand_shares: + """ + Calculate buildings' heat demand shares of federal state, region and + municipalities by merging zonal statistics (for HH and CTS) + """ + input: + state=lambda wildcards: DATASET_PATH / f"demand_heat_zonal_stats-{SECTOR_MAPPING[wildcards.sector]}-bkg_vg250_state.gpkg", + federal_states=lambda wildcards: DATASET_PATH / f"demand_heat_zonal_stats-{SECTOR_MAPPING[wildcards.sector]}-bkg_vg250_federal_states.gpkg", + region_muns=lambda wildcards: DATASET_PATH / f"demand_heat_zonal_stats-{SECTOR_MAPPING[wildcards.sector]}-bkg_vg250_muns_region.gpkg" + output: + #demand_shares=lambda wildcards: SECTOR_MAPPING[wildcards.sector] + demand_shares=DATASET_PATH / "demand_heat_shares_{sector}.json" + run: + # Read demands + demand_state = gpd.read_file(input.state) + demand_federal_state = gpd.read_file(input.federal_states) + demand_region_muns = gpd.read_file(input.region_muns) + + demand_state = float(demand_state.heat_demand) + demand_federal_state = float( + demand_federal_state.loc[ + demand_federal_state.nuts == "DEE"].heat_demand) + demand_region_muns = demand_region_muns.heat_demand + + # Calculate shares + demand_shares = { + "federal_state_of_state": demand_federal_state / demand_state, + "region_of_federal_state": ( + demand_region_muns.sum() / demand_federal_state + ), + "muns_of_region": ( + demand_region_muns / demand_region_muns.sum() + ).to_dict() + } + + # Dump + with open(output.demand_shares, "w", encoding="utf8") as f: + json.dump(demand_shares, f, indent=4) + +rule heat_demand_hh_cts: + """ + Calculate absolute heat demands for municipalities for HH and CTS + """ + input: + demand_germany=get_abs_dataset_path( + "preprocessed", "ageb_energy_balance") / "data" / + "ageb_energy_balance_germany_{sector}_twh_2021.csv", + demand_shares=DATASET_PATH / "demand_heat_shares_{sector}.json", + demand_future_TN=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios") / "data" / + "TN-Strom_buildings_heating_demand_by_carrier_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios") / "data" / + "T45-Strom_buildings_heating_demand_by_carrier_reformatted.csv", + output: + DATASET_PATH / "demand_{sector}_heat_demand.csv" + run: + ### Demand 2021 ### + demand_germany=pd.read_csv(input.demand_germany, index_col="carrier") + with open(input.demand_shares, "r") as f: + demand_shares=json.load(f) + + # Calc demand share for each municipality + demand_muns = ( + pd.DataFrame.from_dict( + demand_shares.get("muns_of_region"), + orient="index" + ) + * demand_shares.get("federal_state_of_state") + * demand_shares.get("region_of_federal_state") + * demand_germany.drop("Strom", axis=0)[ + ["space_heating", "hot_water", "process_heat"] + ].sum().sum() + * 1e6 # TWh to MWh + ) + demand_muns.index.name = "municipality_id" + demand_muns.rename(columns={0: 2022}, inplace=True) + + ### Demand 2045: Use reduction factor ### + demand_muns = demand_muns.join( + demand_prognosis( + demand_future_T45=input.demand_future_T45, + demand_future_TN=input.demand_future_TN, + demand_region=demand_muns, + year_base=2022, + year_target=2045, + scale_by="total", + ).rename(columns={2022: 2045}) + ) + print( + f"Total heat demand for sector {wildcards.sector} in TWh:\n", + demand_muns.sum() / 1e6 + ) + + # Dump as CSV + demand_muns.to_csv(output[0]) + +rule heat_demand_ind: + """ + Calculate absolute heat demands for municipalities for Industry + """ + input: + demand_heat_ind_germany=get_abs_dataset_path( + "preprocessed", "ageb_energy_balance") / "data" / + "ageb_energy_balance_germany_ind_twh_2021.csv", + demand_ind_states=rules.preprocessed_regiostat_extract_demand_ind.output.demand_states, + demand_ind_districts=rules.preprocessed_regiostat_extract_demand_ind.output.demand_districts, + lau_codes=rules.preprocessed_eurostat_lau_create.output, + employment=get_abs_dataset_path("datasets","employment_region") / + "data" / "employment.csv", + demand_future_TN=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "TN-Strom_ind_demand_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_ind_demand_reformatted.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + DATASET_PATH / "demand_ind_heat_demand.csv" + run: + # Industrial heat demand Germany + demand_heat_ind_germany = pd.read_csv( + input.demand_heat_ind_germany, index_col="carrier" + ) + demand_heat_ind_germany = demand_heat_ind_germany.drop("Strom", axis=0)[ + ["space_heating", "hot_water", "process_heat"] + ].sum().sum() * 1e6 # TWh to MWh + + # Industrial energy demand federal states and districts + demand_ind_germany = pd.read_csv( + input.demand_ind_states, index_col="lau_code", dtype={"lau_code": str} + ).total.sum() + demand_ind_districts = pd.read_csv( + input.demand_ind_districts, index_col="lau_code", dtype={"lau_code": str} + ) + + # Get region's NUTS codes + districts = gpd.read_file(input.region_districts) + # Get region's LAU codes + lau_codes_region = pd.read_csv( + input.lau_codes[0], dtype={"lau_code": str}, usecols=["lau_code", "nuts_code"] + ) + lau_codes_region = lau_codes_region.loc[ + lau_codes_region.nuts_code.isin(districts.nuts.to_list())] + lau_codes_region["lau_code"] = lau_codes_region.lau_code.apply(lambda _: _[:5]) + lau_codes_region = lau_codes_region.groupby("lau_code").first() + + # Calculate industrial demand using LAU codes of region + demand_ind_districts = demand_ind_districts.loc[ + lau_codes_region.index.to_list()].join(lau_codes_region) + demand_ind_districts = demand_ind_districts[["nuts_code", "total"]].set_index("nuts_code") + demand_ind_districts = demand_ind_districts.assign( + demand_heat_share_germany=demand_ind_districts.total.div(demand_ind_germany) + ) + demand_ind_districts = demand_ind_districts.assign( + demand_heat=demand_ind_districts.demand_heat_share_germany.mul(demand_heat_ind_germany) + ) + demand_ind_districts = demand_ind_districts.rename( + columns={"demand_heat": "demand_districts"})[["demand_districts"]] + + # Disggregate using employees in industry sector + demand_muns = disaggregate_demand_to_municipality( + demand_districts=demand_ind_districts, + muns=gpd.read_file(input.region_muns), + districts=districts, + disagg_data=pd.read_csv(input.employment, index_col=0), + disagg_data_col="employees_ind" + ).rename(columns={"employees_ind": 2022}) + + demand_muns.index.name = "municipality_id" + demand_muns.rename(columns={0: 2022}, inplace=True) + + ### Demand 2045: Use reduction factor ### + demand_muns = demand_muns.join( + demand_prognosis( + demand_future_T45=input.demand_future_T45, + demand_future_TN=input.demand_future_TN, + demand_region=demand_muns, + year_base=2022, + year_target=2045, + scale_by="total", + ).rename(columns={2022: 2045}) + ) + print( + f"Total heat demand for sector industry in TWh:\n", + demand_muns.sum() / 1e6 + ) + + # Dump as CSV + demand_muns.to_csv(output[0]) + +rule normalize_timeseries: + """ + Extract heat demand timeseries for districts, merge and normalize them + """ + input: + timeseries=get_abs_dataset_path("preprocessed", "demandregio") / + "data" / "dr_{sector}_gas_timeseries_2011.csv", + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + timeseries=DATASET_PATH / "demand_{sector}_heat_timeseries.csv" + run: + normalize_filter_timeseries( + infile=input.timeseries, + outfile=output.timeseries, + region_nuts=gpd.read_file(input.region_districts).nuts.to_list(), + ) + +rule district_heating: + """ + Calculate heat demands using district heating shares + """ + input: + heat_demand=DATASET_PATH / "demand_{sector}_heat_demand.csv" + output: + heat_demand_cen=DATASET_PATH / "demand_{sector}_heat_demand_cen.csv", + heat_demand_dec=DATASET_PATH/ "demand_{sector}_heat_demand_dec.csv" + run: + print( + f"Split heat demand into central and decentral heat demand for " + f"sector: {wildcards.sector}" + ) + heat_demand = pd.read_csv( + input.heat_demand, + index_col="municipality_id", + ) + ds_shares = pd.DataFrame.from_dict( + config["district_heating_share"].get(wildcards.sector), + orient="index", + columns=["district_heating_share"] + ) + + # Check municipality IDs + if not all(mun_id in heat_demand.index for mun_id in ds_shares.index): + raise ValueError( + "One or more municipality IDs from district_heating_share are " + "not found in the heat demand data." + ) + + # Calculate district heating and decentral heating demand + heat_demand_cen = heat_demand.mul( + ds_shares.district_heating_share, axis=0) + heat_demand_dec = heat_demand.mul( + (1 - ds_shares.district_heating_share), axis=0) + + # Dump + heat_demand_cen.to_csv(output.heat_demand_cen) + heat_demand_dec.to_csv(output.heat_demand_dec) + +rule heating_structure_hh_cts: + """ + Create heating structure for households and CTS: demand per technology + """ + input: + demand_future_TN=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios") / "data" / + "TN-Strom_buildings_heating_demand_by_carrier_reformatted.csv", + demand_future_T45=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios") / "data" / + "T45-Strom_buildings_heating_demand_by_carrier_reformatted.csv", + rel_capacities_biomass_dec=get_abs_dataset_path( + "preprocessed","dbfz_biomass_capacity_rel") / "data" / + "dbfz_biomass_capacity_rel_decentral.csv", + generation_cen_TN45=get_abs_dataset_path( + "preprocessed","bmwk_long_term_scenarios") / "data" / + "T45-Strom_Generation_Heatgrids_Germany_reformatted.csv", + rel_capacities_biomass_cen=get_abs_dataset_path( + "preprocessed","dbfz_biomass_capacity_rel") / "data" / + "dbfz_biomass_capacity_rel_central.csv" + output: + heating_structure_cen=( + #DATASET_PATH / "demand_{sector}_heat_structure_cen.csv" + DATASET_PATH / "demand_heat_structure_cen.csv" + ), + heating_structure_esys_cen=( + DATASET_PATH/ "demand_heat_structure_esys_cen.csv" + ), + heating_structure_dec=( + #DATASET_PATH / "demand_{sector}_heat_structure_dec.csv" + DATASET_PATH / "demand_heat_structure_dec.csv" + ), + heating_structure_esys_dec=( + DATASET_PATH/ "demand_heat_structure_esys_dec.csv" + ) + run: + def get_demand_dec(): + # Get data from future scenarios + demand_future_T45 = pd.read_csv( + input.demand_future_T45 + ) # .set_index(["year", "carrier"]) + demand_future_TN = pd.read_csv( + input.demand_future_TN + ) # .set_index(["year", "carrier"]) + + # Interpolate for base year + demand_future = pd.concat( + [ + demand_future_TN.loc[demand_future_TN.year == 2020], + demand_future_T45, + ], + axis=0, + ) + demand_future = demand_future.set_index(["year", "carrier"]).append( + pd.DataFrame( + index=pd.MultiIndex.from_product( + [[2022], demand_future.carrier.unique(), [np.nan]], + names=demand_future.columns, + ) + ) + ) + demand_future.sort_index(inplace=True) + demand_future = demand_future.unstack(level=1).interpolate().stack() + + # Calculate heating structure + demand_future = demand_future.loc[ + config["heating_structure"].get("years") + ].reset_index() + + demand_total = ( + demand_future[["year", "demand"]].groupby("year").sum() + ) + demand_dec = demand_future.loc[ + demand_future.carrier != "district_heating" + ] + # Drop auxiliary power + demand_dec = demand_dec.loc[ + demand_dec.carrier != "electricity_auxiliary" + ] + demand_total_dec = ( + demand_dec[["year", "demand"]].groupby("year").sum() + ) + demand_dec = demand_dec.set_index("year").assign( + demand_rel=demand_dec.set_index("year").demand.div( + demand_total_dec.demand + ) + ) + demand_dec.drop(columns=["demand"], inplace=True) + + # demand_total_cen = demand_total - demand_total_dec + + return demand_dec + + def get_demand_dec_esys(demand_dec): + # Aggregate heat pump demand + demand_dec.carrier = demand_dec.carrier.replace( + { + "ambient_heat_heat_pump": "heat_pump", + "electricity_heat_pump": "heat_pump", + } + ) + demand_dec = ( + demand_dec.reset_index().groupby(["year", "carrier"]).sum() + ) + + # Reset index + demand_dec.reset_index(inplace=True) + + # Read relative capacities + rel_capacities_biomass_dec = pd.read_csv( + input.rel_capacities_biomass_dec, index_col="year" + ) + rel_capacities_biomass_dec.reset_index(inplace=True) + + # Get years from config + years = config["heating_structure"].get("years") + + # Get relative demand per conversion technology + # In the following, it is assumed for simplification that the + # distribution over the capacity corresponds to that of the energy + # amount. For this to be the case, the full load hours of all + # conversion plants would have to be the same, but in reality they + # are not. + for year in years: + ( + rel_capacities_biomass_dec, + demand_dec, + ) = get_rel_demand_conv_tech_biomass( + rel_capacities_biomass_dec, demand_dec, year + ) + + demand_dec_esys = update_rel_demand_with_conv_tech_biomass( + rel_capacities_biomass_dec, demand_dec + ) + + return demand_dec_esys + + def get_demand_cen(): + # Read central heating generation per technology + heat_gen_cen_T45 = pd.read_csv(input.generation_cen_TN45) + + # Read relative capacities + rel_capacities_biomass_cen = pd.read_csv( + input.rel_capacities_biomass_cen, + index_col="year", + ) + + # Change year 2021 to 2022 neglecting changes in generation + # structure during that period + heat_gen_cen_T45.loc[ + heat_gen_cen_T45["year"] == 2021, "year" + ] = 2022 + + rel_demand_cen = pd.DataFrame() + rel_demand_cen_esys = pd.DataFrame() + + for year in config["heating_structure"].get("years"): + # Filter data by year + heat_gen_cen_T45_year = heat_gen_cen_T45[ + heat_gen_cen_T45["year"] == year + ] + + # Get all carriers used in technology to aggregate + carrier_to_agg = config["carrier_to_agg"][year] + + # Get all unique carriers provided in the values of + # carrier_to_agg dict + unique_carrier = list(set(carrier_to_agg.values())) + + # Rename values in technology col according to aggregation + # convention in carrier_to_agg + for index, row in heat_gen_cen_T45_year.iterrows(): + if row["technology"] in carrier_to_agg: + heat_gen_cen_T45_year.loc[ + index, "technology" + ] = carrier_to_agg[row["technology"]] + + # Aggregate by carrier / technology according to carrier_to_agg + heat_gen_cen_T45_year = heat_gen_cen_T45_year.groupby( + "technology" + ).agg( + { + "generation": "sum", + "year": "first", + } + ) + + # Reset the index + heat_gen_cen_T45_year.reset_index(inplace=True) + + # Rename column 'technology' to 'carrier' and 'generation' to + # 'demand_rel' + heat_gen_cen_T45_year.rename( + {"generation": "demand_rel", "technology": "carrier"}, + axis=1, + inplace=True, + ) + + # Get relative demand for all carriers + rel_demand_cen_year = get_relative_demand(heat_gen_cen_T45_year) + rel_demand_cen_year = order_cols_df(rel_demand_cen_year) + rel_demand_cen = pd.concat( + [rel_demand_cen, rel_demand_cen_year], ignore_index=True + ) + + # Get all unique carriers listed in raw data + carriers = list(heat_gen_cen_T45_year["carrier"].unique()) + + # Get unknown carriers if raw data contains carriers that are + # not in the energy system + unknown_carriers = list(set(carriers) - set(unique_carrier)) + + # Drop all unknown carriers + for unknown_carrier in unknown_carriers: + heat_gen_cen_T45_year.drop( + heat_gen_cen_T45_year[ + heat_gen_cen_T45_year["carrier"] == unknown_carrier + ].index, + inplace=True, + ) + + rel_demand_cen_esys_year = get_relative_demand( + heat_gen_cen_T45_year + ) + rel_demand_cen_esys_year = order_cols_df( + rel_demand_cen_esys_year + ) + rel_demand_cen_esys = pd.concat( + [rel_demand_cen_esys, rel_demand_cen_esys_year], + ignore_index=True, + ) + + ( + rel_capacities_biomass_cen, + rel_demand_cen_esys, + ) = get_rel_demand_conv_tech_biomass( + rel_capacities_biomass_cen, rel_demand_cen_esys, year + ) + + rel_demand_cen_esys = update_rel_demand_with_conv_tech_biomass( + rel_capacities_biomass_cen, rel_demand_cen_esys + ) + + return rel_demand_cen, rel_demand_cen_esys + + def get_relative_demand(_df): + grouped_df = _df.groupby(by=["carrier", "year"]).sum( + numeric_only=True + ) + + # Calculate relative demand from absolute values + rel_demand_df = grouped_df.apply( + lambda rel_demand_df: rel_demand_df / rel_demand_df.sum() + ) + + return rel_demand_df + + def order_cols_df(_df): + # Drop the index + _df.reset_index(inplace=True) + + # Set index on the year to move the column to the front + _df.set_index("year", inplace=True) + _df.reset_index(inplace=True) + + return _df + + def get_rel_demand_conv_tech_biomass( + rel_capacities_biomass, rel_demand, year + ): + if rel_capacities_biomass.index.name == "year": + rel_capacities_biomass.reset_index(inplace=True) + + # Get relative demand of biomass in year + rel_demand_biomass = rel_demand.loc[ + rel_demand["carrier"].eq("biomass") + & rel_demand["year"].eq(year), + "demand_rel", + ] + + # Calculate relative capacities of biomass conversion technologies + # with the relative demand of biomass + rel_capacities_biomass.loc[ + rel_capacities_biomass["year"] == year, "capacity_rel" + ] *= rel_demand_biomass.values[0] + + return rel_capacities_biomass, rel_demand + + def update_rel_demand_with_conv_tech_biomass( + rel_capacities_biomass, rel_demand + ): + # Rename column "capacity_rel" to "demand_rel" + rel_capacities_biomass.rename( + {"capacity_rel": "demand_rel"}, axis=1, inplace=True + ) + + # Merge Dataframe with relative capacities with the one with + # relative demands + rel_demand_updated = pd.concat( + [rel_demand, rel_capacities_biomass], ignore_index=True + ) + + # Drop redundant entry for biomass + rel_demand_updated.drop( + rel_demand_updated[ + rel_demand_updated["carrier"] == "biomass" + ].index, + inplace=True, + ) + + # Sort by year + rel_demand_updated.sort_values( + by=["year"], inplace=True, ignore_index=True + ) + + return rel_demand_updated + + # Get relative demand of decentral heat + demand_dec = get_demand_dec() + + # Dump heating structure (for info) + demand_dec.to_csv(output.heating_structure_dec) + + # Get relative demand of decentral heat of esys + demand_dec_esys = get_demand_dec_esys(demand_dec) + + # Dump heating structure (for esys) + demand_dec_esys.to_csv(output.heating_structure_esys_dec, index=False) + + # Get relative demand of central heat + rel_demand_cen, rel_demand_cen_esys = get_demand_cen() + + # Dump heating structure (for info) + rel_demand_cen.to_csv( + output.heating_structure_cen, + index=False, + ) + + # Dump heating structure (for esys) + rel_demand_cen_esys.to_csv( + output.heating_structure_esys_cen, + index=False, + ) + +rule create_captions: + """ + Create attribute captions for app + """ + input: + demand=rules.datasets_demand_heat_region_heating_structure_hh_cts.output, + bmwk_lts=rules.preprocessed_bmwk_long_term_scenarios_create_captions.output[0] + output: DATASET_PATH / "demand_heat_region_attribute_captions.json" + run: + bmwk_lts = load_json(input.bmwk_lts) + captions = { + "datasets_caption_map": { + Path(f).stem: "demand_heat" for f in input.demand + }, + "captions": { + "demand_heat": + bmwk_lts["captions"]["bmwk_long_term_scenarios"] + } + } + with open(output[0], "w", encoding="utf8") as f: + json.dump(captions, f, indent=4) diff --git a/digipipe/store/datasets/demand_heat_region/data/.gitkeep b/digipipe/store/datasets/demand_heat_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/demand_heat_region/dataset.md b/digipipe/store/datasets/demand_heat_region/dataset.md new file mode 100644 index 00000000..694607e2 --- /dev/null +++ b/digipipe/store/datasets/demand_heat_region/dataset.md @@ -0,0 +1,148 @@ +# Wärmebedarf + +Wärmebedarfe (Endenergie) Fernwärme und dezentrale Wärme sowie Wärmezeitreihen +für Haushalte, GHD und Industrie je Gemeinde. + +## Gesamtwärmebedarf + +Die Berechnung der regionalen Prognosewerte je Verbrauchssektor erfolgt anhand +landesweiter Prognosen aus den +[BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md). + +### Haushalte + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme, desaggregiert auf Gemeinden + mittels Wärmebedarfs-Rasterdaten aus 2015 (Wärmebedarfsdichte 1ha) aus + [Peta5](../../raw/seenergies_peta5/dataset.md). + Anm.: Die Desaggregation könnte alternativ über Zensus "Gebäude mit Wohnraum + nach Heizungsart" (31231-02-01-5, s. + [regiostat](../../raw/regiostat/dataset.md) erfolgen) +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion der + Gebäudewärmebedarfe aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +### GHD + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme, desaggregiert auf Gemeinden + mittels Wärmebedarfs-Rasterdaten aus 2015 (Wärmebedarfsdichte 1ha) aus + [Peta5](../../raw/seenergies_peta5/dataset.md) +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion der + Gebäudewärmebedarfe aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +### Industrie + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme. Die Desaggregation auf + Landkreisebene erfolgt anhand des Gesamtenergiebedarfs im verarbeitenden + Gewerbe aus [Regionalstatistik](../../preprocessed/regiostat/dataset.md). + Die anschließende Desaggregation auf Gemeindeebene wird mittels + Beschäftigtenzahlen im verarbeitenden Gewerbe in 2022 aus + [Regionalstatistik](../../preprocessed/regiostat/dataset.md) vorgenommen. +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + industriellen Gesamtenergiebedarfs aus + [BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Im Unterschied zu Haushalten und GHD liegen die Daten für den + Wärme- und Stromanteil nicht getrennt vor, sodass auf den + Gesamtenergiebedarf zurückgegriffen wird. + Es wird das Szenario "TN-Strom" als Grundlage für den Status quo verwendet und + Werte für 2022 interpoliert. Die Zielwerte werden dem Szenario "T45-Strom" + entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. +- Es erfolgt keine Aufteilung des Wärmebedarfs auf unterschiedliche + Temperaturniveaus. + +## Dezentrale Wärme und Fernwärme + +Der Gesamtwärmebedarf wird auf dezentrale Heizsysteme und Fernwärme aufgeteilt. +Fernwärmenetze existieren in Dessau-Roßlau, Bitterfeld-Wolfen, Köthen und +Wittenberg. + +Da keine Daten zum tatsächlichen Fernwärmebedarf vorliegen, werden Annahmen auf +Basis folgender Quellen getroffen: + +- [Zensus 2011: Gebäude nach Heizungsart](https://www.regionalstatistik.de/genesis//online?operation=table&code=31211-04-01-5-B) +- [BMWK Langfristszenarien: Wärmenachfrage in Wärmenetzen (HH&GHD) (2025)](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/54022/62a2667df6f8c176ff129f7ede944837) +- [STALA ST: Wärmebilanz der Industriebetriebe (2021)](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c256237) +- [STALA ST: Energie- und Wasserversorgung](https://statistik.sachsen-anhalt.de/fileadmin/Bibliothek/Landesaemter/StaLa/startseite/Themen/Energie/Berichte/6E403_2020-A.pdf) +- [WindNODE](https://windnode-abw.readthedocs.io/en/latest/energy_system_model.html#district-heating) +- [Peta5: D5 1 District Heating Areas (2020)](https://s-eenergies-open-data-euf.hub.arcgis.com/datasets/b62b8ad79f0e4ae38f032ad6aadb91a0_0/) + +Annahmen zu Fernwärmeanteilen (Anteil der Endenergie aus Fernwärme an gesamter +Wärme-Endenergie) je Bedarfssektor: + +| Fernwärmenetz | Haushalte | GHD | Industrie | +|-------------------|----------:|-----:|----------:| +| Dessau-Roßlau | 0,36 | 0,36 | 0,19 | +| Bitterfeld-Wolfen | 0,11 | 0,11 | 0,21 | +| Köthen | 0,07 | 0,07 | 0,21 | +| Wittenberg | 0,15 | 0,15 | 0,01 | + +Die Fernwärmeanteile können in der [config.yml](config.yml) im Abschnitt +`district_heating_share` für jeden Sektor separat angepasst werden. Es wird +vereinfachend angenommen, dass der Anteil an Fernwärme für alle +Szenarien/Zieljahre gleich bleibt. + +## Beheizungsstruktur + +Die Beheizungsstruktur für 2020 und 2045 wird den +[BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) +entnommen (Gebäude: Haushalte und GHD Energiebedarf) und für 2022 interpoliert. +Hierbei wird nach Technologien für dezentrale sowie Fernwärme unterschieden. +Für die Biomasse wird der relative Energiebedarf mit Hilfe von Anteilen der +installierten Leistung von spezifischen Biomasse-Konversionsanlagen +[dbfz_biomasss_capacity_rel](../../preprocessed/dbfz_biomass_capacity_rel/dataset.md) +je Technologie aufgelöst. Der Vereinfachung halber wird angenommen, dass die +relative installierte Leistung der relativen Energiemenge entspricht. + +## Ergebnisdaten + +- Haushalte: Wärmebedarf gesamt: `demand_hh_heat_demand.csv` +- Haushalte: Wärmebedarf Fernwärme: `demand_hh_heat_demand_cen.csv` +- Haushalte: Wärmebedarf dezentrale Wärme: `demand_hh_heat_demand_dec.csv` +- Haushalte: Zeitreihen: `demand_hh_heat_timeseries.csv` + +- GHD: Wärmebedarf gesamt: `demand_cts_heat_demand.csv` +- GHD: Wärmebedarf Fernwärme: `demand_cts_heat_demand_cen.csv` +- GHD: Wärmebedarf dezentrale Wärme: `demand_cts_heat_demand_dec.csv` +- GHD: Zeitreihen: `demand_cts_heat_timeseries.csv` + +- Industrie: Wärmebedarf gesamt: `demand_ind_heat_demand.csv` +- Industrie: Wärmebedarf Fernwärme: `demand_ind_heat_demand_cen.csv` +- Industrie: Wärmebedarf dezentrale Wärme: `demand_ind_heat_demand_dec.csv` +- GHD: Zeitreihen: `demand_ind_heat_timeseries.csv` + +- Beheizungsstruktur dezentral (informativ): `demand_heat_structure_dec.csv` +- Beheizungsstruktur zentral (informativ): `demand_heat_structure_cen.csv` +- Beheizungsstruktur dezentral für Weiterverwendung im Energiesystem: + `demand_heat_structure_esys_dec.csv` +- Beheizungsstruktur Fernwärme für Weiterverwendung im Energiesystem: + `demand_heat_structure_esys_cen.csv` diff --git a/digipipe/store/datasets/demand_heat_region/scripts/create.py b/digipipe/store/datasets/demand_heat_region/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/emissions_region/config.yml b/digipipe/store/datasets/emissions_region/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/emissions_region/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/emissions_region/create.smk b/digipipe/store/datasets/emissions_region/create.smk new file mode 100644 index 00000000..00666d45 --- /dev/null +++ b/digipipe/store/datasets/emissions_region/create.smk @@ -0,0 +1,80 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import pandas as pd +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "datasets", "emissions_region", data_dir=True) + +rule create_chart_data: + """ + Extract and aggregate emissions for charts + """ + input: + get_abs_dataset_path("raw", "emissions") / "data" / "emissions.csv" + output: + DATASET_PATH / "emissions_chart_overview.json" + run: + emissions = pd.read_csv( + input[0], + header=[0, 1], + index_col=[0, 1, 2, 3], + ) + + emissions_dict = dict( + # Sector: Energy industry (CRF 1.A.1 + 1.B) + **emissions.loc[[ + (1, "A", "1", "energy_industry"), + (1, "B", "1", "diffuse_emissions_solid_fuels"), + (1, "B", "2", "diffuse_emissions_fuel_oil_natural_gas"), + ]].sum().to_frame( + name="energy_industry").round(1).to_dict(orient="list"), + + # Sector: Industry (CRF 1.A.2 + 2) + **emissions.loc[[ + (1, "A", "2", "industry"), + (2, "*", "*", "process_emissions"), + ]].sum().to_frame( + name="industry").round(1).to_dict(orient="list"), + + # Sector: Traffic (CRF 1.A.3) + **emissions.loc[ + (1, "A", "3", "traffic"), + ].to_frame( + name="traffic").round(1).to_dict(orient="list"), + + # Sector: Buildings (CRF 1.A.4 + 1.A.5) + **emissions.loc[ + (1, "A", "4-5", "buildings_firing"), + ].to_frame( + name="buildings_firing").round(1).to_dict(orient="list"), + + # Sector: Agriculture (CRF 3) + **emissions.groupby( + "sector" + ).sum().loc[3].to_frame( + name="agricultural").round(1).to_dict(orient="list"), + + # Sector: Waste and waste water (CRF 5) + **emissions.loc[ + (5, "*", "*", "waste_waste_water"), + ].to_frame( + name="waste_waste_water").round(1).to_dict(orient="list"), + ) + + with open(output[0], "w", encoding="utf8") as f: + json.dump(emissions_dict, f, indent=4) + +rule copy_emissions: + """ + Copy raw emissions file + """ + input: + get_abs_dataset_path("raw", "emissions") / "data" / "emissions.csv" + output: + DATASET_PATH / "emissions.csv" + shell: "cp -p {input} {output}" diff --git a/digipipe/store/datasets/emissions_region/data/.gitkeep b/digipipe/store/datasets/emissions_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/emissions_region/dataset.md b/digipipe/store/datasets/emissions_region/dataset.md new file mode 100644 index 00000000..952e1bc4 --- /dev/null +++ b/digipipe/store/datasets/emissions_region/dataset.md @@ -0,0 +1,8 @@ +# Emissionen + +Emissionen für Sachsen-Anhalt und die Region, aggregiert nach Sektoren der +CRF-Nomenklatur. + +Datei `emissions.json` enthält Chartdaten. + +Raw dataset: [emissions](../../raw/emissions/dataset.md) diff --git a/digipipe/store/datasets/employment_region/config.yml b/digipipe/store/datasets/employment_region/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/employment_region/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/employment_region/create.smk b/digipipe/store/datasets/employment_region/create.smk new file mode 100644 index 00000000..f585d5e6 --- /dev/null +++ b/digipipe/store/datasets/employment_region/create.smk @@ -0,0 +1,25 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG +) + +DATASET_PATH = get_abs_dataset_path("datasets", "employment_region") + +rule create: + """ + Create employment dataset for region + """ + input: + employment_total=get_abs_dataset_path("preprocessed", "ba_employment") / "data" / "employment_muns.csv", + employment_ind=get_abs_dataset_path("preprocessed", "regiostat") / "data" / "employment_industry_muns.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG + output: + DATASET_PATH / "data" / "employment.csv" + script: + DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/datasets/employment_region/data/.gitkeep b/digipipe/store/datasets/employment_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/employment_region/dataset.md b/digipipe/store/datasets/employment_region/dataset.md new file mode 100644 index 00000000..8b913f2e --- /dev/null +++ b/digipipe/store/datasets/employment_region/dataset.md @@ -0,0 +1,8 @@ +# Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gesamtanzahl sozialversicherungspflichtig Beschäftigte und Betriebsstätten +je Gemeinde für die Region. + +Raw datasets: +[ba_employment](../../raw/ba_employment/dataset.md), +[regiostat](../../raw/regiostat/dataset.md) diff --git a/digipipe/store/datasets/employment_region/scripts/create.py b/digipipe/store/datasets/employment_region/scripts/create.py new file mode 100644 index 00000000..268e0f07 --- /dev/null +++ b/digipipe/store/datasets/employment_region/scripts/create.py @@ -0,0 +1,54 @@ +import geopandas as gpd +import pandas as pd + + +def process() -> None: + # Get muns + muns = gpd.read_file(snakemake.input.region_muns) + + # Get total employment data + employment_total = pd.read_csv( + snakemake.input.employment_total, index_col=0, dtype={"ags": str} + ).loc[muns.ags.to_list()] + + # Get industrial employment data + employment_ind = pd.read_csv( + snakemake.input.employment_ind, index_col=0, dtype={"ags": str} + ).loc[muns.ags.to_list()][["employees_ind", "companies_ind"]] + + # Fill missing data by average employees per company + employees_per_company = employment_ind.employees_ind.div( + employment_ind.companies_ind + ) + if employees_per_company.isna().sum() > 0: + print( + f"WARNING: Number of employees or companies missing in " + f"{employees_per_company.isna().sum()} of {len(muns)} " + f"municipalities! Using average for employees per company " + f"({employees_per_company.mean().round(1)}) for those..." + ) + employment_ind = employment_ind.assign( + employees_ind=employment_ind.employees_ind.fillna( + employment_ind.companies_ind * employees_per_company.mean() + ) + ) + employment_ind = employment_ind.assign( + employees_ind=employment_ind.employees_ind.round().astype(int) + ) + + # Join total data with industry data + employment = employment_total.join(employment_ind) + + # Join employment data with municipality ids + employment = ( + muns.rename(columns={"id": "municipality_id"}) + .set_index("ags") + .merge(employment, left_index=True, right_index=True) + .set_index("municipality_id") + )[["employees_total", "companies_total", "employees_ind", "companies_ind"]] + + employment.to_csv(snakemake.output[0]) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/datasets/esys_raw/data/.gitkeep b/digipipe/store/datasets/esys_raw/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/heatpump_cop/config.yml b/digipipe/store/datasets/heatpump_cop/config.yml new file mode 100644 index 00000000..0ab63384 --- /dev/null +++ b/digipipe/store/datasets/heatpump_cop/config.yml @@ -0,0 +1,19 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Heatpump parameters +heatpumps: + technology: + # Market shares + share_ASHP: 0.8 + share_GSHP: 0.2 + params: + heating_temp: 40 + quality_grade_ASHP: 0.4 + quality_grade_GSHP: 0.4 + icing_factor: 0.8 + icing_temp: 2 + # Seasonal performance factors (efficiency gain until 2045) + spf_ASHP: 1.25 + spf_GSHP: 1.25 diff --git a/digipipe/store/datasets/heatpump_cop/create.smk b/digipipe/store/datasets/heatpump_cop/create.smk new file mode 100644 index 00000000..d32691cb --- /dev/null +++ b/digipipe/store/datasets/heatpump_cop/create.smk @@ -0,0 +1,43 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import pandas as pd +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "heatpump_cop") + +rule create: + """ + Calculate COPs for air- and ground source heatpumps + """ + input: + temperature=rules.preprocessed_dwd_temperature_create.output + output: + cop_ashp=DATASET_PATH / "data" / "heatpump_cop_ashp_timeseries.csv", + cop_gshp=DATASET_PATH / "data" / "heatpump_cop_gshp_timeseries.csv" + script: + DATASET_PATH / "scripts" / "create.py" + +rule merge: + """ + Create one heatpump COP timeseries by weighting both COP timeseries + """ + input: rules.datasets_heatpump_cop_create.output + output: DATASET_PATH / "data" / "heatpump_cop_timeseries.csv" + run: + tech = config["heatpumps"].get("technology") + cop = pd.concat( + [ + pd.read_csv(f, index_col=0) + for f in input + ], + axis=1, + ) + cop = pd.Series( + cop.cop_ashp * tech.get("share_ASHP") + + cop.cop_gshp * tech.get("share_GSHP"), + name="cop" + ).round(3) + cop.to_csv(output[0]) diff --git a/digipipe/store/datasets/heatpump_cop/data/.gitkeep b/digipipe/store/datasets/heatpump_cop/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/heatpump_cop/dataset.md b/digipipe/store/datasets/heatpump_cop/dataset.md new file mode 100644 index 00000000..92edf018 --- /dev/null +++ b/digipipe/store/datasets/heatpump_cop/dataset.md @@ -0,0 +1,35 @@ +# Wärmepumpen COP + +Zeitreihe für die Leistungszahl / Coefficient of performance (COP) für +Wärmepumpen. Berücksichtigt werden Luftwärmepumpen (ASHP) und Erdwärmepumpen +(GSHP). Der COP wird mit Hilfe von Zeitreihen der Umgebungstemperatur (ASHP) +bzw. der Bodentemperatur (GSHP) für jeden Zeitschritt berechnet. + +Details zur Berechnungsmethodik können der Dokumentation von +[oemof.thermal](https://oemof-thermal.readthedocs.io/en/latest/compression_heat_pumps_and_chillers.html) +entnommen werden. + +Annahmen: + +- Vorlauftemperatur: 40 °C +- Gütegrad / Quality grade: 0.4 (nach + [VDE](https://www.energiedialog2050.info/wp-content/uploads/simple-file-list/VDE_ST_ETG_Warmemarkt_RZ-web.pdf)) +- Vereisungsverluste bei ASHP: 20 % bei <2 °C + +Daraus ergibt sich eine mittlere Jahresarbeitszahl (JAZ) von 3,3 für ASHP und +4,3 für GSHP, die mit typischen Werten für 2019 +([AEW](https://static.agora-energiewende.de/fileadmin/Projekte/2022/2022-04_DE_Scaling_up_heat_pumps/A-EW_273_Waermepumpen_WEB.pdf)) +übereinstimmen. Für das Zukunftsszenario wird ferner ein Effizienzgewinn durch +technische Weiterentwicklung von 25 % angenommen +[ewi](https://www.ewi.uni-koeln.de/cms/wp-content/uploads/2015/12/2014_06_24_ENDBER_P7570_Energiereferenzprognose-GESAMT-FIN-IA.pdf). + +Beide separat erstelle Zeitreihen werden anhand der heutigen Marktdurchdringung +gewichtet und in eine mittlere Zeitreihe für Wärmepumpen überführt. Im Jahr +2022 wurden 87 % ASHP und 13 % GSHP abgesetzt nach +[BWP](https://www.waermepumpe.de/fileadmin/user_upload/waermepumpe/05_Presse/01_Pressemitteilungen/BWP_Branchenstudie_2023_DRUCK.pdf), +über die letzten 10 Jahre beträgt das Verhältnis ca. 80:20. +Für 2045 wird daher ein Anteil von 80 % ASHP und 20 % GSHP angenommen. + +Verwendet Datensätze: + +- [dwd_temperature](../../preprocessed/dwd_temperature/dataset.md) diff --git a/digipipe/store/datasets/heatpump_cop/scripts/create.py b/digipipe/store/datasets/heatpump_cop/scripts/create.py new file mode 100644 index 00000000..ac316dd7 --- /dev/null +++ b/digipipe/store/datasets/heatpump_cop/scripts/create.py @@ -0,0 +1,93 @@ +import pandas as pd + + +def calc_heat_pump_cops( + t_high: list, + t_low: list, + quality_grade: float, + consider_icing: bool = False, + temp_icing: float = None, + factor_icing: float = None, + spf: float = None, +) -> list: + """Calculate temperature-dependent COP of heat pumps including efficiency + gain over time. + + COP-Code was adapted from oemof-thermal: + https://github.com/oemof/oemof-thermal/blob/features/cmpr_heatpumps_and_chillers/src/oemof/thermal/compression_heatpumps_and_chillers.py + + Efficiency corrections are based upon increase of seasonal performance + factor (SPF) for scenario year since today SQ. + """ + + # Expand length of lists with temperatures and convert unit to Kelvin + length = max([len(t_high), len(t_low)]) + if len(t_high) == 1: + list_t_high_k = [t_high[0] + 273.15] * length + elif len(t_high) == length: + list_t_high_k = [t + 273.15 for t in t_high] + if len(t_low) == 1: + list_t_low_k = [t_low[0] + 273.15] * length + elif len(t_low) == length: + list_t_low_k = [t + 273.15 for t in t_low] + + # Calculate COPs + if not consider_icing: + cops = [ + quality_grade * t_h / (t_h - t_l) + for t_h, t_l in zip(list_t_high_k, list_t_low_k) + ] + + # Temperatures below 2 degC lead to icing at evaporator in + # heat pumps working with ambient air as heat source. + elif consider_icing: + cops = [] + for t_h, t_l in zip(list_t_high_k, list_t_low_k): + if t_l < temp_icing + 273.15: + cops = cops + [factor_icing * quality_grade * t_h / (t_h - t_l)] + if t_l >= temp_icing + 273.15: + cops = cops + [quality_grade * t_h / (t_h - t_l)] + + # Efficiency gain for scenario year + if spf is not None: + cops = [_ * spf for _ in cops] + + return cops + + +def process() -> None: + # Get heatpump params and temperature timeseries + hp_params = snakemake.config["heatpumps"].get("params") + temp = pd.read_csv( + snakemake.input.temperature[0], + index_col=0, + ) + + # Calculate COPs + cops_ashp = calc_heat_pump_cops( + t_high=[hp_params.get("heating_temp")], + t_low=temp.temp_amb.to_list(), + quality_grade=hp_params.get("quality_grade_ASHP"), + consider_icing=True, + temp_icing=hp_params.get("icing_temp"), + factor_icing=hp_params.get("icing_factor"), + spf=hp_params.get("spf_ASHP"), + ) + cops_gshp = calc_heat_pump_cops( + t_high=[hp_params.get("heating_temp")], + t_low=temp.temp_soil.to_list(), + quality_grade=hp_params.get("quality_grade_GSHP"), + spf=hp_params.get("spf_GSHP"), + ) + + # Round and dump + pd.Series(cops_ashp, name="cop_ashp",).round( + 3 + ).to_csv(snakemake.output.cop_ashp) + pd.Series(cops_gshp, name="cop_gshp",).round( + 3 + ).to_csv(snakemake.output.cop_gshp) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/datasets/module.smk b/digipipe/store/datasets/module.smk new file mode 100644 index 00000000..e294931a --- /dev/null +++ b/digipipe/store/datasets/module.smk @@ -0,0 +1,157 @@ +""" +Dataset registry for datasets module which is loaded by main snakemake file. +All datasets in the datasets category must be added to this file. + +Template: +--------- +module : + snakefile: "/create.smk" + config: config["store"]["datasets"][""] +use rule * from as datasets__* + +""" + +module bkg_vg250_districts_region: + snakefile: "bkg_vg250_districts_region/create.smk" + config: config["store"]["datasets"]["bkg_vg250_districts_region"] +use rule * from bkg_vg250_districts_region as datasets_bkg_vg250_districts_region_* + +module bkg_vg250_muns_region: + snakefile: "bkg_vg250_muns_region/create.smk" + config: config["store"]["datasets"]["bkg_vg250_muns_region"] +use rule * from bkg_vg250_muns_region as datasets_bkg_vg250_muns_region_* + +module bkg_vg250_state: + snakefile: "bkg_vg250_state/create.smk" + config: config["store"]["datasets"]["bkg_vg250_state"] +use rule * from bkg_vg250_state as datasets_bkg_vg250_state_* + +module bkg_vg250_federal_states: + snakefile: "bkg_vg250_federal_states/create.smk" + config: config["store"]["datasets"]["bkg_vg250_federal_states"] +use rule * from bkg_vg250_federal_states as datasets_bkg_vg250_federal_states_* + +module bkg_vg250_region: + snakefile: "bkg_vg250_region/create.smk" + config: config["store"]["datasets"]["bkg_vg250_region"] +use rule * from bkg_vg250_region as datasets_bkg_vg250_region_* + +module bnetza_mastr_wind_region: + snakefile: "bnetza_mastr_wind_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_wind_region"] +use rule * from bnetza_mastr_wind_region as datasets_bnetza_mastr_wind_region_* + +module bnetza_mastr_pv_ground_region: + snakefile: "bnetza_mastr_pv_ground_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_pv_ground_region"] +use rule * from bnetza_mastr_pv_ground_region as datasets_bnetza_mastr_pv_ground_region_* + +module bnetza_mastr_pv_roof_region: + snakefile: "bnetza_mastr_pv_roof_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_pv_roof_region"] +use rule * from bnetza_mastr_pv_roof_region as datasets_bnetza_mastr_pv_roof_region_* + +module bnetza_mastr_biomass_region: + snakefile: "bnetza_mastr_biomass_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_biomass_region"] +use rule * from bnetza_mastr_biomass_region as datasets_bnetza_mastr_biomass_region_* + +module bnetza_mastr_hydro_region: + snakefile: "bnetza_mastr_hydro_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_hydro_region"] +use rule * from bnetza_mastr_hydro_region as datasets_bnetza_mastr_hydro_region_* + +module bnetza_mastr_combustion_region: + snakefile: "bnetza_mastr_combustion_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_combustion_region"] +use rule * from bnetza_mastr_combustion_region as datasets_bnetza_mastr_combustion_region_* + +module bnetza_mastr_gsgk_region: + snakefile: "bnetza_mastr_gsgk_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_gsgk_region"] +use rule * from bnetza_mastr_gsgk_region as datasets_bnetza_mastr_gsgk_region_* + +module bnetza_mastr_storage_region: + snakefile: "bnetza_mastr_storage_region/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_storage_region"] +use rule * from bnetza_mastr_storage_region as datasets_bnetza_mastr_storage_region_* + +module bnetza_mastr_captions: + snakefile: "bnetza_mastr_captions/create.smk" + config: config["store"]["datasets"]["bnetza_mastr_captions"] +use rule * from bnetza_mastr_captions as datasets_bnetza_mastr_captions_* + +module population_region: + snakefile: "population_region/create.smk" + config: config["store"]["datasets"]["population_region"] +use rule * from population_region as datasets_population_region_* + +module employment_region: + snakefile: "employment_region/create.smk" + config: config["store"]["datasets"]["employment_region"] +use rule * from employment_region as datasets_employment_region_* + +module demand_electricity_region: + snakefile: "demand_electricity_region/create.smk" + config: config["store"]["datasets"]["demand_electricity_region"] +use rule * from demand_electricity_region as datasets_demand_electricity_region_* + +module heatpump_cop: + snakefile: "heatpump_cop/create.smk" + config: config["store"]["datasets"]["heatpump_cop"] +use rule * from heatpump_cop as datasets_heatpump_cop_* + +module demand_heat_region: + snakefile: "demand_heat_region/create.smk" + config: config["store"]["datasets"]["demand_heat_region"] +use rule * from demand_heat_region as datasets_demand_heat_region_* + +module renewable_feedin: + snakefile: "renewable_feedin/create.smk" + config: config["store"]["datasets"]["renewable_feedin"] +use rule * from renewable_feedin as datasets_renewable_feedin_* + +module potentialarea_wind_region: + snakefile: "potentialarea_wind_region/create.smk" + config: config["store"]["datasets"]["potentialarea_wind_region"] +use rule * from potentialarea_wind_region as datasets_potentialarea_wind_region_* + +module potentialarea_pv_ground_region: + snakefile: "potentialarea_pv_ground_region/create.smk" + config: config["store"]["datasets"]["potentialarea_pv_ground_region"] +use rule * from potentialarea_pv_ground_region as datasets_potentialarea_pv_ground_region_* + +module potentialarea_pv_roof_region: + snakefile: "potentialarea_pv_roof_region/create.smk" + config: config["store"]["datasets"]["potentialarea_pv_roof_region"] +use rule * from potentialarea_pv_roof_region as datasets_potentialarea_pv_roof_region_* + +module rli_pv_wfr_region: + snakefile: "rli_pv_wfr_region/create.smk" + config: config["store"]["datasets"]["rli_pv_wfr_region"] +use rule * from rli_pv_wfr_region as datasets_rli_pv_wfr_region_ * + +module technology_data: + snakefile: "technology_data/create.smk" + config: config["store"]["datasets"]["technology_data"] +use rule * from technology_data as datasets_technology_data_ * + +module osm_buildings: + snakefile: "osm_buildings/create.smk" + config: config["store"]["datasets"]["osm_buildings"] +use rule * from osm_buildings as datasets_osm_buildings_ * + +module emissions_region: + snakefile: "emissions_region/create.smk" + config: config["store"]["datasets"]["emissions_region"] +use rule * from emissions_region as datasets_emissions_region_ * + +module app_captions: + snakefile: "app_captions/create.smk" + config: config["store"]["datasets"]["app_captions"] +use rule * from app_captions as datasets_app_captions_ * + +module app_settings: + snakefile: "app_settings/create.smk" + config: config["store"]["datasets"]["app_settings"] +use rule * from app_settings as datasets_app_settings_* diff --git a/digipipe/store/datasets/osm_buildings/config.yml b/digipipe/store/datasets/osm_buildings/config.yml new file mode 100644 index 00000000..aeacb2f8 --- /dev/null +++ b/digipipe/store/datasets/osm_buildings/config.yml @@ -0,0 +1,17 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# If true, the conditions from "tags" are used as filters (e.g. +# ["building", "yes"] results in building=yes) +# If false, the keys from "tags" are selected but no further conditions apply +use_conditions: false + +# Tags/conditions to include (format: [tag, value])) +tags: + [ + ["building", "*"], + ] + +# Intermediate and output layer name +layer_name: "osm_buildings" diff --git a/digipipe/store/datasets/osm_buildings/create.smk b/digipipe/store/datasets/osm_buildings/create.smk new file mode 100644 index 00000000..8f9109cf --- /dev/null +++ b/digipipe/store/datasets/osm_buildings/create.smk @@ -0,0 +1,146 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import json +import geopandas as gpd + +from digipipe.store.utils import ( + get_abs_dataset_path, + create_tag_string_ogr +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "osm_buildings", data_dir=True) + +rule extract_buildings: + """ + Create layers from converted OSM file using requested tags per layer. Only + requested attributes are retained. + """ + input: + get_abs_dataset_path( + "preprocessed", "osm_filtered", data_dir=True + ) / "germany-230101_filtered.osm.gpkg" + output: DATASET_PATH / "osm_buildings.gpkg" + params: + tags=create_tag_string_ogr(config["tags"]), + layer_name=config["layer_name"] + run: + conditions = ( + params.tags['conditions'] + if config["use_conditions"] + else "" + ) + shell( + f"ogr2ogr -f GPKG -select {params.tags['tags']} " + f"{conditions} {output} {input} -nln {params.layer_name} " + f"multipolygons" + ) + +rule create_centroids: + """ + Create centroids for buildings and attach ground area (in sqm) + """ + input: DATASET_PATH / "osm_buildings.gpkg" + output: DATASET_PATH / "osm_buildings_centroids.gpkg" + params: + layer_name=config["layer_name"] + run: + shell( + f"ogr2ogr -f GPKG -sql " + f"'SELECT ST_Centroid(geom) as geom, ST_Area(geom) AS area_sqm " + f"FROM {params.layer_name}' -dialect sqlite " + f"{output} {input} -nln '{params.layer_name}'" + ) + +rule merge_with_region_file: + """ + Merge centroids file with region file + """ + input: + centroids=DATASET_PATH / "osm_buildings_centroids.gpkg", + region=rules.datasets_bkg_vg250_region_create.output + output: DATASET_PATH / "osm_buildings_centroids_regionmerge.gpkg" + params: + layer_name=config["layer_name"] + run: + shell( + f"ogrmerge.py -f GPKG -o {output} {input.centroids} {input.region} " + "-nln '{{DS_BASENAME}}'" + ) + +rule intersect_with_region: + """ + Intersect centroids with region + """ + input: DATASET_PATH / "osm_buildings_centroids_regionmerge.gpkg" + output: DATASET_PATH / "osm_buildings_centroids_region.gpkg" + params: + layer_name=config["layer_name"] + run: + shell( + f"ogr2ogr -sql 'SELECT a.area_sqm, a.geom " + f"FROM osm_buildings_centroids AS a, bkg_vg250_region AS b " + f"WHERE ST_INTERSECTS(a.geom, b.geom)' -dialect sqlite " + f"{output} {input} -nln '{params.layer_name}'" + ) + +rule calc_area_totals: + """ + Calculate total ground area of buildings for region and country + """ + input: + region=DATASET_PATH / "osm_buildings_centroids_region.gpkg", + country=DATASET_PATH / "osm_buildings_centroids.gpkg" + output: + region=DATASET_PATH / "osm_buildings_ground_area_region.gpkg", + country=DATASET_PATH/ "osm_buildings_ground_area_country.gpkg" + params: + layer_name=config["layer_name"] + run: + shell( + f"ogr2ogr -sql 'SELECT sum(area_sqm) AS area_sum_sqm " + f"FROM {params.layer_name}' -dialect sqlite " + f"{output.region} {input.region} -nln '{params.layer_name}'" + ) + shell( + f"ogr2ogr -sql 'SELECT sum(area_sqm) AS area_sum_sqm " + f"FROM {params.layer_name}' -dialect sqlite " + f"{output.country} {input.country} -nln '{params.layer_name}'" + ) + +rule calc_building_ground_area_share: + """ + Calculate share of region's total building ground area in country's total + building ground area. + """ + input: + region=DATASET_PATH / "osm_buildings_ground_area_region.gpkg", + country=DATASET_PATH/ "osm_buildings_ground_area_country.gpkg" + output: + DATASET_PATH / "osm_buildings_ground_area.json" + run: + area_region = round( + float(gpd.read_file(input.region).area_sum_sqm.sum()) + ) + area_country = round( + float(gpd.read_file(input.country).area_sum_sqm.sum()) + ) + area_share = round(area_region / area_country, 4) + print( + f"Share of region's total building ground area in country's total " + f"building ground area: {area_share}" + ) + with open(output[0], "w", encoding="utf8") as f: + json.dump( + { + "building_ground_area_country": area_country, + "building_ground_area_region": area_region, + "building_ground_area_share_region": area_share + }, + f, + indent=4 + ) diff --git a/digipipe/store/datasets/osm_buildings/data/.gitkeep b/digipipe/store/datasets/osm_buildings/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/osm_buildings/dataset.md b/digipipe/store/datasets/osm_buildings/dataset.md new file mode 100644 index 00000000..f8645d35 --- /dev/null +++ b/digipipe/store/datasets/osm_buildings/dataset.md @@ -0,0 +1,19 @@ +# OpenStreetMap Gebäude + +OSM Gebäude aus [osm_filtered](../../preprocessed/osm_filtered/dataset.md) +mittels OGR extrahieren und nach Tags (s. [config.yml](config.yml)) filtern. + +Ziel ist die Ermittlung des regionalen Anteils Gebäudegrundflächen an der +gesamten Gebäudegrundfläche in Deutschland. + +Schritte: + +- Extraktion aller Gebäude in Deutschland --> `osm_buildings.gpkg` +- Zentroide und Fläche je Gebäude erstellen --> `osm_buildings_centroids.gpkg` +- Mit Region verschneiden --> `osm_buildings_centroids_region.gpkg` +- Flächensumme berechnen --> `osm_buildings_ground_area_region.gpkg`, + `osm_buildings_ground_area_country.gpkg` +- Regionalen Anteil berechnen --> `osm_buildings_ground_area_share_region.json` + +**Achtung:** Konvertierungs- und Extraktionsprozess benötigt ~15 GB +Speicherplatz und kann viel Zeit in Anspruch nehmen. diff --git a/digipipe/store/datasets/population_region/config.yml b/digipipe/store/datasets/population_region/config.yml new file mode 100644 index 00000000..6a3b6e4a --- /dev/null +++ b/digipipe/store/datasets/population_region/config.yml @@ -0,0 +1,29 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Population prognosis: years to be included from state prognosis +# data on municipal level (empty list to deactivate). +prognosis_fstate_munlevel: + years: + [ + 2025, + 2030, + 2035 + ] + +# Population prognosis: years to be included from country prognosis +# data on state level (empty list to deactivate). +prognosis_germany_districtlevel: + years: + [ + 2040, + 2045 + ] + +# Population extrapolation: years to be covered (empty list to deactivate) +# Note: all available prognosis years will be used for the extrapolation, +# not-requested prognosis years (above) will be simply dropped in the end. +extrapolation: + years: + [] diff --git a/digipipe/store/datasets/population_region/create.smk b/digipipe/store/datasets/population_region/create.smk new file mode 100644 index 00000000..d23114b1 --- /dev/null +++ b/digipipe/store/datasets/population_region/create.smk @@ -0,0 +1,34 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG, + PATH_TO_REGION_DISTRICTS_GPKG +) + +DATASET_PATH = get_abs_dataset_path("datasets", "population_region") + +rule create: + """ + Create full population dataset for region + """ + input: + pop_history=expand( + get_abs_dataset_path("preprocessed", "destatis_gv") / "data" / "3112{year}_Auszug_GV.csv", + year=[2010, 2015, 2020, 2021, 2022] + ), + prognosis_fstate_munlevel=get_abs_dataset_path( + "preprocessed", "stala_st_pop_prog") / "data" / + "population_prognosis_st_muns.csv", + prognosis_germany_districtlevel=get_abs_dataset_path( + "preprocessed", "demandregio") / "data" / "dr_hh_population.csv", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + region_districts=PATH_TO_REGION_DISTRICTS_GPKG + output: + DATASET_PATH / "data" / "population.csv" + script: + DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/datasets/population_region/data/.gitkeep b/digipipe/store/datasets/population_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/population_region/dataset.md b/digipipe/store/datasets/population_region/dataset.md new file mode 100644 index 00000000..89f35d7e --- /dev/null +++ b/digipipe/store/datasets/population_region/dataset.md @@ -0,0 +1,36 @@ +# Bevölkerungsentwicklung + +EinwohnerInnen je Gemeinde: Historische Daten und Prognosen + +## Historische Daten bis 2022 + +Statistisches Bundesamt (Raw dataset: +[destatis_gv](../../raw/destatis_gv/dataset.md)) + +## Prognosen bis 2035 + +Statistisches Landesamt Sachsen-Anhalt (Raw dataset: +[stala_st_pop_prog](../../raw/stala_st_pop_prog/dataset.md)). Deaktivieren +mittels entfernen der Zieljahre in [config.yml](config.yml) im Abschnitt +`prognosis_fstate_munlevel`. + +Kann für andere Regionen auch durch DemandRegio (s.u.) ersetzt werden, die +tatsächliche regionale Auflösung wird dadurch reduziert. + +## Prognosen bis 2045 + +DemandRegio (Raw dataset: [demandregio](../../raw/demandregio/dataset.md)) +basierend auf der +[14. koordinierten Bevölkerungsvorausberechnung](https://www.destatis.de/DE/Themen/Gesellschaft-Umwelt/Bevoelkerung/Bevoelkerungsvorausberechnung/aktualisierung-bevoelkerungsvorausberechnung.html) +der Statistischen Ämter von Bund und Ländern. Diese Daten liegen auf +Landkreisebene vor, daher erfolgt eine gleichmäßige Skalierung der +dazugehörigen Gemeinden auf den jeweiligen Prognosewert. + +Deaktivieren mittels entfernen der Zieljahre in [config.yml](config.yml) im +Abschnitt `prognosis_germany_districtlevel`. + +## Extrapolation + +Über 2045 hinaus wird lineare Extrapolation auf Basis der letzten beiden +Prognosejahre unterstützt. Um diese zu aktivieren, müssen lediglich Zieljahre +in die [config.yml](config.yml) im Abschnitt `extrapolation` eingetragen werden. diff --git a/digipipe/store/datasets/population_region/scripts/create.py b/digipipe/store/datasets/population_region/scripts/create.py new file mode 100644 index 00000000..512d1a11 --- /dev/null +++ b/digipipe/store/datasets/population_region/scripts/create.py @@ -0,0 +1,185 @@ +import geopandas as gpd +import pandas as pd + + +def process() -> None: + # pylint: disable=R0914 + # Get muns and their NUTS3 code + muns = gpd.read_file(snakemake.input.region_muns) + districts = gpd.read_file(snakemake.input.region_districts) + muns = muns.merge( + districts[["id", "nuts"]].rename(columns={"id": "district_id"}), + left_on="district_id", + right_on="district_id", + ) + + # Historical data: load, get available years and filter by region + pop_history = pd.concat( + [ + pd.read_csv(f, dtype={"ags": str}, index_col="ags") + for f in snakemake.input.pop_history + ], + axis=1, + ) + pop_history = pop_history.loc[pop_history.index.isin(muns.ags)] + avail_years_history = pop_history.columns.astype(int).to_list() + print(f"Historical years: {avail_years_history}") + + # Prognosis: load state data on municipal level and filter by region + pop_prognosis_years_mun = snakemake.config["prognosis_fstate_munlevel"][ + "years" + ] + if len(pop_prognosis_years_mun) > 0: + pop_prognosis_mun = pd.read_csv( + snakemake.input.prognosis_fstate_munlevel, + dtype={"ags": str}, + index_col="ags", + ).round() + pop_prognosis_mun = pop_prognosis_mun.loc[ + pop_prognosis_mun.index.isin(muns.ags) + ] + avail_years_prognosis = pop_prognosis_mun.columns.astype(int).to_list() + print( + f"Prognosis years (state data on municipal level): " + f"{pop_prognosis_years_mun}" + ) + pop_reference = pop_prognosis_mun + pop_reference_years = pop_prognosis_years_mun + else: + pop_reference = pop_history + pop_reference_years = avail_years_history + print( + "No years for prognosis with state data on municipal level " + "provided, skipping..." + ) + + # Prognosis: load country data on district (NUTS 3) level + pop_prognosis_years_district = snakemake.config[ + "prognosis_germany_districtlevel" + ]["years"] + if len(pop_prognosis_years_district) > 0: + pop_prognosis_district = pd.read_csv( + snakemake.input.prognosis_germany_districtlevel, + index_col="nuts3", + ) + pop_prognosis_district = pop_prognosis_district.loc[muns.nuts.unique()][ + [str(y) for y in pop_prognosis_years_district] + ] + avail_years_prognosis = pop_prognosis_district.columns.astype( + int + ).to_list() + print( + f"Prognosis years (country data on district level): " + f"{pop_prognosis_years_district}" + ) + + # Merge prognoses: create municipal prognosis from NUTS 3 level data by + # using municipal shares from the last available state's municipal + # prognosis. + pop_reference_lastyear = pd.concat( + [ + pop_reference[str(pop_reference_years[-1])], + muns[["ags", "nuts"]].set_index("ags"), + ], + axis=1, + ) + pop_reference_lastyear = ( + pop_reference_lastyear.assign( + share=( + pop_reference_lastyear[str(pop_reference_years[-1])] + / pop_reference_lastyear.groupby("nuts")[ + str(pop_reference_years[-1]) + ].transform("sum") + ) + ) + .drop(columns=[str(pop_reference_years[-1])]) + .reset_index() + ) + pop_prognosis_district = pop_reference_lastyear.merge( + pop_prognosis_district.reset_index(), + left_on="nuts", + right_on="nuts3", + ).set_index("ags") + for year in pop_prognosis_years_district: + pop_reference[str(year)] = ( + pop_prognosis_district["share"] + .mul(pop_prognosis_district[str(year)]) + .round() + ) + else: + print( + "No years for prognosis with country data on district level " + "provided, skipping..." + ) + + # Extrapolate population linearly for years from config + extrapol_years = snakemake.config["extrapolation"]["years"] + if len(extrapol_years) > 0: + print(f"Extrapolation years: {extrapol_years}") + for year in extrapol_years: + # Extrapolate using the last 2 available years + year_delta_base = ( + avail_years_prognosis[-1] - avail_years_prognosis[-2] + ) + year_delta_extrapol = year - avail_years_prognosis[-1] + pop_reference[str(year)] = ( + pop_reference[str(avail_years_prognosis[-1])] + + ( + pop_reference[str(avail_years_prognosis[-1])] + - pop_reference[str(avail_years_prognosis[-2])] + ) + / year_delta_base + * year_delta_extrapol + ) + else: + print("No extrapolation years provided, skipping extrapolation...") + + # Drop not requested years + pop_reference.drop( + columns=[ + c + for c in pop_reference.columns + if int(c) + not in ( + avail_years_history + + pop_reference_years + + pop_prognosis_years_district + + extrapol_years + ) + ], + inplace=True, + ) + + # Extra-include historic data depending on if first prognosis was used + if not all( + str(year) in pop_reference.columns for year in avail_years_history + ): + population = pd.concat([pop_history, pop_reference], axis=1) + else: + population = pop_reference + + # Add municipality_id and data origin + population = ( + pd.concat( + [muns.set_index("ags")["id"].rename("municipality_id"), population], + axis=1, + ) + .sort_index() + .set_index("municipality_id", drop=True) + ) + population.columns = pd.MultiIndex.from_arrays( + [ + population.columns, + len(avail_years_history) * ["historic"] + + (len(pop_prognosis_years_mun) + len(pop_prognosis_years_district)) + * ["prognosis"] + + len(extrapol_years) * ["extrapolation"], + ], + names=("year", "type"), + ) + + population.to_csv(snakemake.output[0]) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/datasets/potentialarea_pv_ground_region/config.yml b/digipipe/store/datasets/potentialarea_pv_ground_region/config.yml new file mode 100644 index 00000000..99e76cbd --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_ground_region/config.yml @@ -0,0 +1,15 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Area names (which identify files) +areas: + [ + "agriculture_lfa-off_region", + "road_railway_region", + ] + +# PV targets split: share of pv_ground +# Caution: when adjusting this value, the pv_roof_share in dataset +# potentialarea_pv_roof_region must be adjusted accordingly! +pv_ground_share: 0.5 diff --git a/digipipe/store/datasets/potentialarea_pv_ground_region/create.smk b/digipipe/store/datasets/potentialarea_pv_ground_region/create.smk new file mode 100644 index 00000000..107879e1 --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_ground_region/create.smk @@ -0,0 +1,218 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import re +import geopandas as gpd +import pandas as pd +from pathlib import Path + +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.data_io import load_json +from digipipe.scripts.geo import ( + overlay, + convert_to_multipolygon, + write_geofile +) +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "potentialarea_pv_ground_region", data_dir=True +) + +rule overlay_muns: + """ + Overlay potential area with municipalities + """ + input: + area=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True) / "{file}", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG + output: + area=DATASET_PATH / "{file}" + run: + data = gpd.read_file(input.area) + data = overlay( + gdf=data, + gdf_overlay=gpd.read_file(input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + write_geofile( + gdf=convert_to_multipolygon(data), + file=output.area, + ) + +rule create_area_stats_muns: + """ + Create stats on pv potential areas per mun + """ + input: + area=expand( + DATASET_PATH / "potentialarea_pv_{area}.gpkg", + area=config["areas"], + ), + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: DATASET_PATH / "potentialarea_pv_ground_area_stats_muns.csv" + run: + print("PV ground potential area stats:") + muns = gpd.read_file(input.region_muns) + area_dict = {} + + # Calc areas per area type file + for file in input.area: + area_name = re.findall( + "potentialarea_pv_(.*).gpkg", + Path(file).name, + )[0] + data = gpd.read_file(file) + data["area_km2"] = data.area / 1e6 + area_km2 = data[ + ["municipality_id", "area_km2"] + ].groupby("municipality_id").sum() + + # Set area of non-occurring muns to 0 + area_km2 = area_km2.reindex(muns.id, fill_value=0) + area_dict[area_name] = area_km2.to_dict()["area_km2"] + print( + f" Total area for {area_name}: " + f"{round(float(area_km2.sum()), 1)} sqm" + ) + + area_df = pd.DataFrame(area_dict) + area_df.index.name="municipality_id" + area_df.to_csv(output[0]) + +rule create_potarea_shares: + """ + Calc shares of actual potential areas in total potential areas (per type) + """ + input: + potarea_pv_road_railway=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "potentialarea_pv_road_railway_region.gpkg", + road_railway_500m=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "road_railway-500m_region.gpkg", + potarea_pv_agri=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "potentialarea_pv_agriculture_lfa-off_region.gpkg", + soil_quality_low=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "soil_quality_low_region.gpkg", + + output: DATASET_PATH / "potentialarea_pv_ground_area_shares.json" + run: + area_dict = { + "road_railway": round( + gpd.read_file(input.potarea_pv_road_railway).area.sum() / + gpd.read_file(input.road_railway_500m).area.sum(), + 3 + ), + "agri": round( + gpd.read_file(input.potarea_pv_agri).area.sum() / + gpd.read_file(input.soil_quality_low).area.sum(), + 3 + ), + } + + # Dump + with open(output[0], "w", encoding="utf8") as f: + json.dump(area_dict, f, indent=4) + +rule regionalize_state_targets: + """ + Calculate PV ground targets of region + """ + input: + potarea_pv_road_railway=get_abs_dataset_path( + "preprocessed", "rli_pv_wfr", data_dir=True + ) / "potentialarea_pv_road_railway.gpkg", + potarea_pv_agri=get_abs_dataset_path( + "preprocessed", "rli_pv_wfr", data_dir=True + ) / "potentialarea_pv_agriculture_lfa-off.gpkg", + potarea_pv_road_railway_region=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "potentialarea_pv_road_railway_region.gpkg", + potarea_pv_agri_region=get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True + ) / "potentialarea_pv_agriculture_lfa-off_region.gpkg", + el_capacity_targets=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_electricity_installed_power_reformatted.csv", + tech_data=get_abs_dataset_path( + "datasets", "technology_data", data_dir=True + ) / "technology_data.json" + output: + DATASET_PATH / "potentialarea_pv_ground_regionalized_targets.json" + run: + potarea_pv_rr = gpd.read_file(input.potarea_pv_road_railway).to_crs( + GLOBAL_CONFIG["global"]["geodata"]["crs"] + ).area.sum() + potarea_pv_agri = gpd.read_file(input.potarea_pv_agri).to_crs( + GLOBAL_CONFIG["global"]["geodata"]["crs"] + ).area.sum() + potarea_pv_rr_region = gpd.read_file( + input.potarea_pv_road_railway_region).area.sum() + potarea_pv_agri_region = gpd.read_file( + input.potarea_pv_agri_region).area.sum() + tech_data = load_json(input.tech_data) + + # Power target from longterm scenario + targets = pd.read_csv(input.el_capacity_targets, index_col="year") + target_cap = targets.loc[ + targets.technology == "pv" + ].loc[2045].capacity * 1e3 * config.get("pv_ground_share") + targets_region = ( + target_cap * ( + (potarea_pv_rr_region + potarea_pv_agri_region) / + (potarea_pv_rr + potarea_pv_agri) + ) + ) + + with open(output[0], "w", encoding="utf8") as f: + json.dump( + { + # Power targets (disaggregated) + "target_power_total": round( + target_cap * ( + (potarea_pv_rr_region + potarea_pv_agri_region) / + (potarea_pv_rr + potarea_pv_agri) + ) + ), + "target_power_road_railway": round( + target_cap * potarea_pv_rr_region / ( + potarea_pv_rr + potarea_pv_agri) + ), + "target_power_agri": round( + target_cap * potarea_pv_agri_region / ( + potarea_pv_rr + potarea_pv_agri) + ), + # Areas targets (from power targets) + "target_area_total": round( + target_cap * ( + (potarea_pv_rr_region + potarea_pv_agri_region) / + (potarea_pv_rr + potarea_pv_agri) + ) / tech_data["power_density"]["pv_ground"] + ,2 + ), + "target_area_road_railway": round( + target_cap * potarea_pv_rr_region / ( + potarea_pv_rr + potarea_pv_agri + ) / tech_data["power_density"]["pv_ground"] + ,2 + ), + "target_area_agri": round( + target_cap * potarea_pv_agri_region / ( + potarea_pv_rr + potarea_pv_agri + ) / tech_data["power_density"]["pv_ground"] + ,2 + ), + }, + f, + indent=4 + ) diff --git a/digipipe/store/datasets/potentialarea_pv_ground_region/data/.gitkeep b/digipipe/store/datasets/potentialarea_pv_ground_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/potentialarea_pv_ground_region/dataset.md b/digipipe/store/datasets/potentialarea_pv_ground_region/dataset.md new file mode 100644 index 00000000..e0ba2a16 --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_ground_region/dataset.md @@ -0,0 +1,59 @@ +# Potenzialgebiete PV-Freiflächen + +## Potenzialflächen + +Potenzialgebiete für die Errichtung von PV-Freiflächenanlagen aus dem +[PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/) +(s. Datensatz [rli_pv_wfr](../../raw/rli_pv_wfr/dataset.md)). + +Die Potenzialflächen bilden jene Flächen ab, die für die Nutzung durch +Freiflächen-Photovoltaikanlagen grundsätzlich zur Verfügung stehen. Sie +orientieren sich an der aktuellen Förderkulisse und wurden anhand des +Flächenumfangs sowie den verfügbaren Geodaten ausgewählt: Von den in §37 EEG +2021 definierten Flächen werden Flächen nach §37 Absatz 1 Nummer 2 Buchstaben c, +h und i berücksichtigt (für Details zur Methodik siehe +[methodisches Begleitdokument](https://zenodo.org/record/6794558) zum PV- und +Windflächenrechner). + +Dateien: + +- Freiflächen-PV auf Acker- und Grünlandflächen mit geringer Bodengüte (Soil + Quality Rating (SQR) < 40): `potentialarea_pv_agriculture_lfa-off_region.gpkg` +- Potenzialflächen für Freiflächen-PV entlang von Bundesautobahnen und + Schienenwegen (500m-Streifen): `potentialarea_pv_road_railway_region.gpkg` + +## Statistische Auswertung + +Die Flächen werden mit den Gemeindegrenzen verschnitten und den Gemeinden +zugeordnet. Je Gemeinde und obigem Flächentyp/Datei wird eine Flächensumme (in +km²) berechnet, siehe `potentialarea_pv_ground_area_stats_muns.csv`. Die +Gemeinden werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. + +Des Weiteren werden die Flächenanteile der verfügbaren Potenzialgebiete - deren +Nutzung nur eingeschränkt möglich ist (z.B. durch Naturschutzgebieten etc.) - +gegenüber den gesamten Potenzialgebiete (für die Parametrierung der Regler) nach +`potentialarea_pv_ground_area_shares.json` exportiert. + +## Ausbauziele + +Es werden PV-Ausbauziele für die Region berechnet, indem die Bundesziele aus den +[BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) +i.H.v. 428 GW +([§4 EEG 2023](https://www.gesetze-im-internet.de/eeg_2014/__4.html): 400 GW) +anhand der regional verfügbaren Potenzialflächen disaggregiert werden. Hierzu +wird der Anteil der Flächensumme der beiden o.g. Flächentypen an den bundesweit +verfügbaren Flächen (Datensatz [rli_pv_wfr](../../raw/rli_pv_wfr/dataset.md)) +berechnet. Da in den o.g. Ausbauzielen nicht zwischen Freiflächen- und +Aufdach-PV unterschieden wird, wird ein Verhältnis von 50:50 angenommen, d.h. +bundesweit 214 GW auf Freiflächen-PV entfallen. + +Es ergeben sich folgende Flächen- und Leistungsanteile: + +Gesamt: 0.38 % (819 MW) + +- Entlang von BAB und Schienenwegen: 0.13 % (278 MW) +- Acker- und Grünlandflächen mit geringer Bodengüte: 0.25 % (541 MW) + +Ergebnisse in `potentialarea_pv_ground_regionalized_targets.json` diff --git a/digipipe/store/datasets/potentialarea_pv_ground_region/scripts/create.py b/digipipe/store/datasets/potentialarea_pv_ground_region/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/potentialarea_pv_roof_region/config.yml b/digipipe/store/datasets/potentialarea_pv_roof_region/config.yml new file mode 100644 index 00000000..671e4259 --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_roof_region/config.yml @@ -0,0 +1,8 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# PV targets split: share of pv_roof +# Caution: when adjusting this value, the pv_ground_share in dataset +# potentialarea_pv_ground_region must be adjusted accordingly! +pv_roof_share: 0.5 diff --git a/digipipe/store/datasets/potentialarea_pv_roof_region/create.smk b/digipipe/store/datasets/potentialarea_pv_roof_region/create.smk new file mode 100644 index 00000000..c5ad8a2e --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_roof_region/create.smk @@ -0,0 +1,217 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import re +import geopandas as gpd +import pandas as pd +from digipipe.scripts.data_io import load_json +from digipipe.scripts.geo import ( + overlay, + convert_to_multipolygon, + write_geofile +) +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "potentialarea_pv_roof_region", data_dir=True +) + +rule overlay_muns: + """ + Overlay potential area with municipalities + """ + input: + area=get_abs_dataset_path( + "preprocessed", "rpg_abw_pv_roof_potential", data_dir=True + ) / "rpg_abw_pv_roof_potential.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG + output: + area=DATASET_PATH / "potentialarea_pv_roof_region.gpkg" + run: + data = gpd.read_file(input.area) + data = data.assign( + historic_preservation=data.historic_preservation.fillna( + False).replace({"true": True}) + ) + data = overlay( + gdf=data, + gdf_overlay=gpd.read_file(input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + gdf_use_centroid=True + ) + write_geofile( + gdf=convert_to_multipolygon(data), + file=output.area, + ) + +rule create_area_stats_muns: + """ + Create stats on PV roof potentials per mun for 1) all and 2) all but + historic buildings. + """ + input: + area=DATASET_PATH / "potentialarea_pv_roof_region.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: + total=DATASET_PATH / "potentialarea_pv_roof_area_stats_muns.csv", + wo_historic=( + DATASET_PATH / + "potentialarea_pv_roof_wo_historic_area_stats_muns.csv" + ) + run: + print("PV roof potential area stats:") + muns = gpd.read_file(input.region_muns) + potential_all = gpd.read_file(input.area).fillna(0) + + # define columns + cols_base = [ + "municipality_id", "historic_preservation", "building_area_sqm" + ] + orientation_suffix = ["south", "north", "east", "west", "flat"] + cols_power = [ + f"installable_power_kw_{orient}" + for orient in orientation_suffix + ] + cols_power_new = { + c.replace("_kw_", "_"): (c, "sum") for c in cols_power + } + cols_energy = [ + f"energy_annual_mwh_{orient}" + for orient in orientation_suffix + ] + cols_energy_new = { + c.replace("_mwh_","_"): (c, "sum") for c in cols_energy + } + + # aggregate per mun + agg_cols = dict( + roof_count=("building_area_sqm", "count"), + building_area_sqm=("building_area_sqm", "sum"), + historic_preservation_count=("historic_preservation", "sum"), + **cols_power_new, + **cols_energy_new + ) + potential_wo_historic = potential_all.copy() + potential_wo_historic = potential_wo_historic.loc[ + ~potential_wo_historic.historic_preservation + ] + + for df, file in zip( + [potential_all, potential_wo_historic], + [output.total, output.wo_historic] + ): + df = df[ + cols_base + cols_power + cols_energy + ].groupby("municipality_id").agg(**agg_cols) + # kW -> MW + df[[_ for _ in cols_power_new.keys()]] = df[ + cols_power_new.keys()].div(1e3) + # Produce totals + df = df.assign( + installable_power_total=df[ + [_ for _ in cols_power_new.keys()]].sum(axis=1), + energy_annual_total=df[ + [_ for _ in cols_energy_new.keys()]].sum(axis=1), + ) + # Dump + df.to_csv(file) + +rule create_relative_deployment_stats_muns: + """ + Create stats on how much of the theoretically installable PV rooftop + potential is used per mun for 1) all and 2) all but historic buildings. + """ + input: + area_stats_total=( + rules.datasets_potentialarea_pv_roof_region_create_area_stats_muns.output.total + ), + area_stats_wo_historic=( + rules.datasets_potentialarea_pv_roof_region_create_area_stats_muns.output.wo_historic + ), + unit_stats=( + rules.datasets_bnetza_mastr_pv_roof_region_create_power_stats_muns.output[0] + ) + output: + total=DATASET_PATH / "potentialarea_pv_roof_deployment_stats_muns.csv", + wo_historic=( + DATASET_PATH/ + "potentialarea_pv_roof_wo_historic_deployment_stats_muns.csv" + ) + run: + orientation_suffix = ["south", "north", "east", "west", "flat"] + cols_power = [ + f"installable_power_{orient}" + for orient in orientation_suffix + ] + pv_installed = pd.read_csv( + input.unit_stats, + usecols=["municipality_id", "capacity_net"], + index_col="municipality_id", + ) + + for file_in, file_out in zip( + [input.area_stats_total, input.area_stats_wo_historic], + [output.total, output.wo_historic] + ): + pv_potential = pd.read_csv( + file_in, + usecols=["municipality_id"] + cols_power, + index_col="municipality_id", + ) + pv_potential = pd.DataFrame( + pv_potential.sum(axis=1),columns=["installable_power"] + ) + pv_deployed = pd.concat( + [pv_installed.capacity_net, pv_potential.installable_power], + axis=1, + ) + pv_deployed = pv_deployed.assign( + relative_deployment=( + pv_installed.capacity_net.div( + pv_potential.installable_power + ) + ) + ) + pv_deployed.to_csv(file_out) + +rule regionalize_state_targets: + """ + Calculate PV roof targets of region + """ + input: + osm_buildings_stats=get_abs_dataset_path( + "datasets", "osm_buildings", data_dir=True + ) / "osm_buildings_ground_area.json", + el_capacity_targets=get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios" + ) / "data" / "T45-Strom_electricity_installed_power_reformatted.csv", + output: + DATASET_PATH / "potentialarea_pv_roof_regionalized_targets.json" + run: + osm_buildings_stats = load_json(input.osm_buildings_stats) + + # Power target from longterm scenario + targets = pd.read_csv(input.el_capacity_targets, index_col="year") + + target_cap = targets.loc[ + targets.technology == "pv" + ].loc[2045].capacity * 1e3 * config.get("pv_roof_share") + + with open(output[0], "w", encoding="utf8") as f: + json.dump( + { + # Power targets (disaggregated) + "target_power_total": round( + target_cap * + osm_buildings_stats["building_ground_area_share_region"] + ), + }, + f, + indent=4 + ) diff --git a/digipipe/store/datasets/potentialarea_pv_roof_region/data/.gitkeep b/digipipe/store/datasets/potentialarea_pv_roof_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/potentialarea_pv_roof_region/dataset.md b/digipipe/store/datasets/potentialarea_pv_roof_region/dataset.md new file mode 100644 index 00000000..44ca28a3 --- /dev/null +++ b/digipipe/store/datasets/potentialarea_pv_roof_region/dataset.md @@ -0,0 +1,42 @@ +# Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft aus Datensatz +[rpg_abw_pv_roof_potential](../../raw/rpg_abw_pv_roof_potential/dataset.md). + +Die Gebäudezentroide werden mit den Gemeindegrenzen verschnitten und den +Gemeinden zugeordnet. +Ergebnisdaten: + +- Alle Gebäude: `potentialarea_pv_roof_area_stats_muns.csv` +- Alle nicht denkmalgeschützten Gebäude: + `potentialarea_pv_roof_wo_historic_area_stats_muns.csv` + +Des Weiteren wird je Gemeinde der relative Anteil der bereits installierten +Anlagenleistung an der theoretisch installierbaren Leistung (bei +100% Dachnutzung) berechnet. +Ergebnisdaten: + +- Alle Gebäude: `potentialarea_pv_roof_deployment_stats_muns.csv` +- Alle nicht denkmalgeschützten Gebäude: + `potentialarea_pv_roof_wo_historic_deployment_stats_muns.csv` + +Die Gemeinden werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. + +## Ausbauziele + +Es werden PV-Ausbauziele für die Region berechnet, indem die Bundesziele aus den +[BMWK Langfristszenarien](../../preprocessed/bmwk_long_term_scenarios/dataset.md) +i.H.v. 428 GW +([§4 EEG 2023](https://www.gesetze-im-internet.de/eeg_2014/__4.html): 400 GW) +anhand der Gebäudegrundflächen disaggregiert werden. Hierzu wird der Anteil der +Gebäudegrundflächen in der Region an der bundesweiten Gebäudegrundflächen +berechnet (s. Datensatz [osm_buildings](../osm_buildings/dataset.md)) und die +Ziele linear skaliert. Da in den o.g. Ausbauzielen nicht zwischen Freiflächen- +und Aufdach-PV unterschieden wird, wird ein Verhältnis von 50:50 angenommen, +d.h. bundesweit 214 GW auf Aufdach-PV entfallen. + +Der Anteil beträgt 0,62 % und das Leistungsziel damit 1327 MW, s. +`potentialarea_pv_roof_regionalized_targets.json`. diff --git a/digipipe/store/datasets/potentialarea_pv_roof_region/scripts/create.py b/digipipe/store/datasets/potentialarea_pv_roof_region/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/potentialarea_wind_region/config.yml b/digipipe/store/datasets/potentialarea_wind_region/config.yml new file mode 100644 index 00000000..79292e6f --- /dev/null +++ b/digipipe/store/datasets/potentialarea_wind_region/config.yml @@ -0,0 +1,24 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Area names (which identify files from preprocessing) +areas: + [ + "stp_2018_vreg", + "stp_2027_repowering", + "stp_2027_search_area_forest_area", + "stp_2027_search_area_open_area", + "stp_2027_vr" + ] + +# Attributes' captions for the app +captions: + {"name_1": "Typ", + "name_2": "Lage", + "name_3": "ID/Nr.", + "name_4": "Anmerkung", + "plan_status": "Plan Status", + "decision_date": "Beschlussdatum", + "authorization_date": "Genehmigungsdatum", + "in_force_date": "Datum des Inkrafttretens"} diff --git a/digipipe/store/datasets/potentialarea_wind_region/create.smk b/digipipe/store/datasets/potentialarea_wind_region/create.smk new file mode 100644 index 00000000..2463973a --- /dev/null +++ b/digipipe/store/datasets/potentialarea_wind_region/create.smk @@ -0,0 +1,103 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import re +import geopandas as gpd +import pandas as pd +from pathlib import Path +from digipipe.scripts.geo import ( + overlay, + convert_to_multipolygon, + write_geofile +) +from digipipe.store.utils import ( + get_abs_dataset_path, + PATH_TO_REGION_MUNICIPALITIES_GPKG +) + +DATASET_PATH = get_abs_dataset_path( + "datasets", "potentialarea_wind_region", data_dir=True +) + +rule overlay_muns: + """ + Overlay potential area with municipalities + """ + input: + area=get_abs_dataset_path( + "preprocessed", "rpg_abw_regional_plan") / "data" / "{file}.gpkg", + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG + output: + area=DATASET_PATH / "potentialarea_wind_{file}.gpkg" + run: + data = gpd.read_file(input.area) + data = overlay( + gdf=data, + gdf_overlay=gpd.read_file(input.region_muns), + retain_rename_overlay_columns={"id": "municipality_id"}, + ) + write_geofile( + gdf=convert_to_multipolygon(data), + file=output.area, + ) + +rule create_area_stats_muns: + """ + Create stats on wind potential areas per mun + """ + input: + area=expand( + DATASET_PATH / "potentialarea_wind_{area}.gpkg", + area=config["areas"], + ), + region_muns=PATH_TO_REGION_MUNICIPALITIES_GPKG, + output: DATASET_PATH / "potentialarea_wind_area_stats_muns.csv" + run: + print("Wind potential area stats:") + muns = gpd.read_file(input.region_muns) + area_dict = {} + + # Calc areas per area type file + for file in input.area: + area_name = re.findall( + "potentialarea_wind_(.*).gpkg", + Path(file).name, + )[0] + data = gpd.read_file(file) + data["area_km2"] = data.area / 1e6 + area_km2 = data[ + ["municipality_id", "area_km2"] + ].groupby("municipality_id").sum() + + # Set area of non-occurring muns to 0 + area_km2 = area_km2.reindex(muns.id, fill_value=0) + area_dict[area_name] = area_km2.to_dict()["area_km2"] + print( + f" Total area for {area_name}: " + f"{round(float(area_km2.sum()), 1)} sqm" + ) + + area_df = pd.DataFrame(area_dict) + area_df.index.name="municipality_id" + area_df.to_csv(output[0]) + +rule create_captions: + """ + Create attribute captions for app + """ + input: rules.datasets_potentialarea_wind_region_create_area_stats_muns.input.area + output: DATASET_PATH / "potentialarea_wind_attribute_captions.json" + run: + captions = { + "datasets_caption_map": { + Path(f).stem: "potentialarea_wind" for f in input + }, + "captions": { + "potentialarea_wind": config["captions"] + } + } + with open(output[0], "w", encoding="utf8") as f: + json.dump(captions, f, indent=4) diff --git a/digipipe/store/datasets/potentialarea_wind_region/data/.gitkeep b/digipipe/store/datasets/potentialarea_wind_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/potentialarea_wind_region/dataset.md b/digipipe/store/datasets/potentialarea_wind_region/dataset.md new file mode 100644 index 00000000..1200da2b --- /dev/null +++ b/digipipe/store/datasets/potentialarea_wind_region/dataset.md @@ -0,0 +1,29 @@ +# Potenzialgebiete Windenergie + +Potenzialgebiete für die Errichtung von Windenergieanlagen, basierend auf den +Teilplänen Wind der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg +aus +[rpg_abw_regional_plan](../../preprocessed/rpg_abw_regional_plan/dataset.md). + +Dateien: + +- STP Wind 2018 - Vorrang-/Eignungsgebiete: + `potentialarea_wind_stp_2018_vreg.gpkg` +- STP Wind 2027 - Planabsicht Vorranggebiete: + `potentialarea_wind_stp_2027_vr.gpkg` +- STP Wind 2027 - Planabsicht Repoweringgebiete: + `potentialarea_wind_stp_2027_repowering.gpkg` +- STP Wind 2027 - Suchraum Wald: + `potentialarea_wind_stp_2027_search_area_forest_area.gpkg` +- STP Wind 2027 - Suchraum Offenland: + `potentialarea_wind_stp_2027_search_area_open_area.gpkg` + +Die darin verwendeten Attributtexte werden in die Datei +`potentialarea_wind_attribute_captions.json` exportiert. + +Die Flächen werden mit den Gemeindegrenzen verschnitten und den Gemeinden +zugeordnet. Je Gemeinde und obigem Flächentyp/Datei wird eine Flächensumme (in +km²) berechnet, siehe `potentialarea_wind_area_stats_muns.csv`. Die Gemeinden +werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. diff --git a/digipipe/store/datasets/potentialarea_wind_region/scripts/create.py b/digipipe/store/datasets/potentialarea_wind_region/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/renewable_feedin/config.yml b/digipipe/store/datasets/renewable_feedin/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/renewable_feedin/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/renewable_feedin/create.smk b/digipipe/store/datasets/renewable_feedin/create.smk new file mode 100644 index 00000000..362223bd --- /dev/null +++ b/digipipe/store/datasets/renewable_feedin/create.smk @@ -0,0 +1,24 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import pandas as pd +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("datasets", "renewable_feedin") + +rule normalize_feedin_timeseries: + """ + Normalize feedin timeseries and drop time index + """ + input: + get_abs_dataset_path( + "raw", "renewables.ninja_feedin") / "data" / + "{tech}_feedin_timeseries.csv" + output: + DATASET_PATH / "data" / "{tech}_feedin_timeseries.csv", + run: + feedin_timeseries = pd.read_csv(input[0]).power + feedin_timeseries = feedin_timeseries.div(feedin_timeseries.sum()) + feedin_timeseries.to_csv(output[0]) diff --git a/digipipe/store/datasets/renewable_feedin/data/.gitkeep b/digipipe/store/datasets/renewable_feedin/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/renewable_feedin/dataset.md b/digipipe/store/datasets/renewable_feedin/dataset.md new file mode 100644 index 00000000..2d525af5 --- /dev/null +++ b/digipipe/store/datasets/renewable_feedin/dataset.md @@ -0,0 +1,16 @@ +# EE-Einspeisezeitreihen + +Einspeisezeitreihen für Erneuerbare Energien. Als Wetterjahr wird 2011 +verwendet, siehe [Szenarien](../../../../docs/sections/scenarios.md). + +Raw dataset mit methodischer Beschreibung: +[renewables.ninja_feedin](../../raw/renewables.ninja_feedin/dataset.md) + +## Einspeisezeitreihen + +Zeitreihe normiert auf Summe=1 für + +- Windenergie: `wind_feedin_timeseries.csv` +- Photovoltaik: `pv_feedin_timeseries.csv` +- Solarthermie: `st_feedin_timeseries.csv` +- Laufwasserkraft: `ror_feedin_timeseries.csv` diff --git a/digipipe/store/datasets/renewable_feedin/scripts/create.py b/digipipe/store/datasets/renewable_feedin/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/rli_pv_wfr_region/config.yml b/digipipe/store/datasets/rli_pv_wfr_region/config.yml new file mode 100644 index 00000000..befbab8d --- /dev/null +++ b/digipipe/store/datasets/rli_pv_wfr_region/config.yml @@ -0,0 +1,34 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be clipped to region +files_clip: + [ + "air_traffic_control_system.gpkg", + "aviation.gpkg", + "biosphere_reserve.gpkg", + "drinking_water_protection_area.gpkg", + "fauna_flora_habitat.gpkg", + "floodplain.gpkg", + "forest.gpkg", + "grid.gpkg", + "industry.gpkg", + "landscape_protection_area.gpkg", + "less_favoured_areas_agricultural.gpkg", + "military.gpkg", + "national_park.gpkg", + "nature_conservation_area.gpkg", + "potentialarea_pv_agriculture_lfa-off.gpkg", + "potentialarea_pv_road_railway.gpkg", + "railway.gpkg", + "road.gpkg", + "road_railway-500m.gpkg", + "settlement-0m.gpkg", + "slope.gpkg", + "soil_quality_high.gpkg", + "soil_quality_low.gpkg", + "special_protection_area.gpkg", + "water.gpkg", + "wetland_ramsar.gpkg" + ] diff --git a/digipipe/store/datasets/rli_pv_wfr_region/create.smk b/digipipe/store/datasets/rli_pv_wfr_region/create.smk new file mode 100644 index 00000000..02b2589e --- /dev/null +++ b/digipipe/store/datasets/rli_pv_wfr_region/create.smk @@ -0,0 +1,48 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd +from digipipe.config import GLOBAL_CONFIG +from digipipe.scripts.geo import ( + overlay, + convert_to_multipolygon, + write_geofile +) +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "datasets", "rli_pv_wfr_region", data_dir=True) + +rule clip_to_region: + """ + Clip to region and reproject + """ + input: + geodata=get_abs_dataset_path( + "preprocessed", "rli_pv_wfr", data_dir=True + ) / "{file}.gpkg", + region=rules.datasets_bkg_vg250_region_create.output + output: + geodata=DATASET_PATH / "{file}_region.gpkg" + run: + print(f"Clipping layer {wildcards.file} to region...") + + geodata = gpd.read_file(input.geodata).to_crs( + GLOBAL_CONFIG["global"]["geodata"]["crs"] + ) + geodata = overlay( + gdf=geodata, + gdf_overlay=gpd.read_file(input.region[0]), + ).drop(columns=["id"]) + + if len(geodata) == 0: + print(" Layer has no data in region!") + + write_geofile( + gdf=convert_to_multipolygon(geodata), + file=output.geodata, + layer_name=wildcards.file, + ) diff --git a/digipipe/store/datasets/rli_pv_wfr_region/data/.gitkeep b/digipipe/store/datasets/rli_pv_wfr_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/rli_pv_wfr_region/dataset.md b/digipipe/store/datasets/rli_pv_wfr_region/dataset.md new file mode 100644 index 00000000..aa48988b --- /dev/null +++ b/digipipe/store/datasets/rli_pv_wfr_region/dataset.md @@ -0,0 +1,8 @@ +# Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/), +extrahiert, zu LAEA Europe (EPSG:3035) umprojiziert und auf die Regionsgrenzen +zugeschnitten. + +Preprocessed dataset: +[rli_pv_windflaechenrechner](../../preprocessed/rli_pv_wfr/dataset.md) diff --git a/digipipe/store/datasets/technology_data/config.yml b/digipipe/store/datasets/technology_data/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/datasets/technology_data/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/datasets/technology_data/create.smk b/digipipe/store/datasets/technology_data/create.smk new file mode 100644 index 00000000..fe0c27a9 --- /dev/null +++ b/digipipe/store/datasets/technology_data/create.smk @@ -0,0 +1,24 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "datasets", "technology_data", data_dir=True) + +rule copy_files: + """ + Copy technology data from raw + """ + input: + get_abs_dataset_path( + "raw", "technology_data") / "data" / "technology_data.json" + output: + DATASET_PATH / "technology_data.json" + shell: + """ + cp -p {input} {output} + """ diff --git a/digipipe/store/datasets/technology_data/data/.gitkeep b/digipipe/store/datasets/technology_data/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/datasets/technology_data/dataset.md b/digipipe/store/datasets/technology_data/dataset.md new file mode 100644 index 00000000..18a2b7b9 --- /dev/null +++ b/digipipe/store/datasets/technology_data/dataset.md @@ -0,0 +1,5 @@ +# Technologiedaten + +Allgemeine Technologiedaten. + +Raw dataset: [technology_data](../../raw/technology_data/dataset.md) diff --git a/digipipe/store/preprocessed/TEMPLATE/config.yml b/digipipe/store/preprocessed/TEMPLATE/config.yml new file mode 100644 index 00000000..441e79a0 --- /dev/null +++ b/digipipe/store/preprocessed/TEMPLATE/config.yml @@ -0,0 +1,5 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Further custom configuration goes here diff --git a/digipipe/store/preprocessed/TEMPLATE/create.smk b/digipipe/store/preprocessed/TEMPLATE/create.smk new file mode 100644 index 00000000..52a00a07 --- /dev/null +++ b/digipipe/store/preprocessed/TEMPLATE/create.smk @@ -0,0 +1,24 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +Please add a docstring with a short description to each rule. +""" +from pathlib import Path +from digipipe.store.utils import get_abs_dataset_path + +#configfile: get_abs_dataset_path("preprocessed", ".TEMPLATE", data_dir=False) / "config.yml" + + +# Use function `get_abs_dataset_path()` to get path to dataset +rule template_simple_copy_file1: + input: get_abs_dataset_path("raw", ".TEMPLATE") / "some_timeseries.csv" + output: get_abs_dataset_path("preprocessed", ".TEMPLATE") / "some_timeseries.csv" + shell: "cp -p {input} {output}" + +# Or you can use relative paths which may break as snakemake can be invoked +# from directories digipipe/ or digipipe/workflow/. +rule template_simple_copy_file2: + input: Path(".") / "store" / "raw" / ".TEMPLATE" / "data" / "some_timeseries.csv" + output: Path(".") / "store" / "preprocessed" / ".TEMPLATE" / "data" / "some_timeseries.csv" + shell: "cp -p {input} {output}" diff --git a/digipipe/store/preprocessed/TEMPLATE/data/.gitkeep b/digipipe/store/preprocessed/TEMPLATE/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/TEMPLATE/dataset.md b/digipipe/store/preprocessed/TEMPLATE/dataset.md new file mode 100644 index 00000000..15bd7af4 --- /dev/null +++ b/digipipe/store/preprocessed/TEMPLATE/dataset.md @@ -0,0 +1,4 @@ +# Name des Datensatzes + +Eine kurze Beschreibung des Datensatzes. +Diese hilft der Dokumentation und bei der Erstellung des Berichts. diff --git a/digipipe/store/preprocessed/ageb_energy_balance/__init__.py b/digipipe/store/preprocessed/ageb_energy_balance/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/ageb_energy_balance/config.yml b/digipipe/store/preprocessed/ageb_energy_balance/config.yml new file mode 100644 index 00000000..03f9fb2d --- /dev/null +++ b/digipipe/store/preprocessed/ageb_energy_balance/config.yml @@ -0,0 +1,54 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Params for PDF extraction + +# Relative area on page in % +area: [58, 0, 86, 100] +sectors: + hh: + page: 10 + column_names: + ["carrier", + "space_heating", + "hot_water", + "process_heat", + "heat_total", + "space_cooling", + "process_cooling", + "cooling_total", + "mech_energy", + "ICT", + "lighting", + "total"] + cts: + page: 14 + column_names: + ["carrier", + "space_heating", + "hot_water", + "process_heat", + "heat_total", + "space_cooling", + "process_cooling", + "cooling_total", + "mech_energy", + "ICT", + "lighting", + "total"] + ind: + page: 6 + column_names: + ["carrier", + "space_heating", + "hot_water", + "process_heat", + "heat_total", + "space_cooling", + "process_cooling", + "cooling_total", + "mech_energy", + "ICT", + "lighting", + "total"] diff --git a/digipipe/store/preprocessed/ageb_energy_balance/create.smk b/digipipe/store/preprocessed/ageb_energy_balance/create.smk new file mode 100644 index 00000000..9d58a1be --- /dev/null +++ b/digipipe/store/preprocessed/ageb_energy_balance/create.smk @@ -0,0 +1,42 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import tabula +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "ageb_energy_balance", data_dir=True) + +rule convert: + """ + Convert PDF to CSVs + """ + input: + get_abs_dataset_path("raw", "ageb_energy_balance") + / "data" / "AGEB_21p2_V3_20221222.pdf" + output: + DATASET_PATH / "ageb_energy_balance_germany_{sector}_twh_2021.csv", + run: + # Read data from PDF + demand = tabula.read_pdf( + input[0], + pages=[config["sectors"][wildcards.sector]["page"]], + area=config["area"], + relative_area=True + )[0] + + # Processing + demand.drop(columns=["Unnamed: 0"], inplace=True) + demand.columns=config["sectors"][wildcards.sector]["column_names"] + demand = demand.set_index("carrier").replace({"-": 0}) + for col in demand.columns: + demand[col] = demand[col].str.replace( + ',', '.').str.replace(" ", "").astype(float).fillna(0) + demand.drop("Insgesamt", axis=0, inplace=True) + + # PJ to TWh + demand = demand.div(3.6) + + # Dump as CSV + demand.to_csv(output[0]) diff --git a/digipipe/store/preprocessed/ageb_energy_balance/data/.gitkeep b/digipipe/store/preprocessed/ageb_energy_balance/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/ageb_energy_balance/dataset.md b/digipipe/store/preprocessed/ageb_energy_balance/dataset.md new file mode 100644 index 00000000..27d2a7f3 --- /dev/null +++ b/digipipe/store/preprocessed/ageb_energy_balance/dataset.md @@ -0,0 +1,7 @@ +# AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021 + +Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie +zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und +Anwendungszwecken für Jahre von 2011 bis 2021 der AG Energiebilanzen. + +Aus PDF extrahierte Tabellenwerte für Haushalte, GHD und Industrie. diff --git a/digipipe/store/preprocessed/ba_employment/__init__.py b/digipipe/store/preprocessed/ba_employment/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/ba_employment/config.yml b/digipipe/store/preprocessed/ba_employment/config.yml new file mode 100644 index 00000000..3ccceb9e --- /dev/null +++ b/digipipe/store/preprocessed/ba_employment/config.yml @@ -0,0 +1,21 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Excel file +excel_file: + sheet_name: "Gemeindedaten" + usecols: "A, K, O" + skiprows: 8 + names: + [ + "ags", + "employees_total", + "companies_total" + ] + dtype: + { + "ags": str, + "employees_total": str, + "companies_total": str + } diff --git a/digipipe/store/preprocessed/ba_employment/create.smk b/digipipe/store/preprocessed/ba_employment/create.smk new file mode 100644 index 00000000..1e34e675 --- /dev/null +++ b/digipipe/store/preprocessed/ba_employment/create.smk @@ -0,0 +1,35 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "ba_employment") + +rule unzip: + """ + Unzip file + """ + input: + get_abs_dataset_path("raw", "ba_employment") / "data" / "gemband-dlk-0-202206-zip.zip" + output: + files = DATASET_PATH / "data" / "gemband_dlk_0.xlsb" + params: + outpath=DATASET_PATH / "data" + shell: + """ + unzip -j {input} -d {params.outpath} + """ + +rule create: + """ + Extract municipality data from Excel file and save to CSV + """ + input: + DATASET_PATH / "data" / "gemband_dlk_0.xlsb" + output: + DATASET_PATH / "data" / "employment_muns.csv" + script: + DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/preprocessed/ba_employment/data/.gitkeep b/digipipe/store/preprocessed/ba_employment/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/ba_employment/dataset.md b/digipipe/store/preprocessed/ba_employment/dataset.md new file mode 100644 index 00000000..a4ea6297 --- /dev/null +++ b/digipipe/store/preprocessed/ba_employment/dataset.md @@ -0,0 +1,6 @@ +# Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gemeindedaten der sozialversicherungspflichtig Beschäftigten am 30.06.2022 nach +Wohn- und Arbeitsort - Deutschland, Länder, Kreise und Gemeinden (Jahreszahlen) +der Bundesagentur für Arbeit. +Anzahl Beschäftigte und Betriebe extrahiert und in CSV konvertiert. diff --git a/digipipe/store/preprocessed/ba_employment/scripts/create.py b/digipipe/store/preprocessed/ba_employment/scripts/create.py new file mode 100644 index 00000000..efe3ef85 --- /dev/null +++ b/digipipe/store/preprocessed/ba_employment/scripts/create.py @@ -0,0 +1,27 @@ +import pandas as pd + + +def process() -> None: + # Get Excel config + excel_cfg = snakemake.config["excel_file"] + + # Read file + data = pd.read_excel( + snakemake.input[0], + **excel_cfg, + engine="pyxlsb", + ) + + # Set empty values to 0 and convert dtypes + data = data.replace(["*", " "], 0) + data = data.astype({"employees_total": int, "companies_total": int}) + + # Drop empoty and aggregated rows + data = data.loc[data.ags.str.len() == 8].set_index("ags") + + # Write + data.to_csv(snakemake.output[0]) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/preprocessed/bkg_vg250/__init__.py b/digipipe/store/preprocessed/bkg_vg250/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/bkg_vg250/config.yml b/digipipe/store/preprocessed/bkg_vg250/config.yml new file mode 100644 index 00000000..fd895984 --- /dev/null +++ b/digipipe/store/preprocessed/bkg_vg250/config.yml @@ -0,0 +1,14 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Layers to be extracted +layers: + [ + "vg250_gem", + "vg250_krs", + "vg250_lan", + "vg250_rbz", + "vg250_vwg", + "vg250_sta" + ] diff --git a/digipipe/store/preprocessed/bkg_vg250/create.smk b/digipipe/store/preprocessed/bkg_vg250/create.smk new file mode 100644 index 00000000..105d0838 --- /dev/null +++ b/digipipe/store/preprocessed/bkg_vg250/create.smk @@ -0,0 +1,26 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "bkg_vg250") + +rule create: + input: + get_abs_dataset_path("raw", "bkg_vg250") / "data" / "vg250_01-01.utm32s.gpkg.ebenen.zip" + output: + DATASET_PATH / "data" / "bkg_vg250.gpkg" + params: + outpath=DATASET_PATH / "data", + original_file=DATASET_PATH / "data" / "DE_VG250.gpkg", + file_path_in_zip=str("vg250_01-01.utm32s.gpkg.ebenen/vg250_ebenen_0101/DE_VG250.gpkg"), + layers=" ".join(config["layers"]) + shell: + """ + unzip -j {input} {params.file_path_in_zip} -d {params.outpath} + ogr2ogr -f GPKG -t_srs EPSG:3035 {output} {params.original_file} {params.layers} + rm {params.original_file} + """ diff --git a/digipipe/store/preprocessed/bkg_vg250/data/.gitkeep b/digipipe/store/preprocessed/bkg_vg250/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/bkg_vg250/dataset.md b/digipipe/store/preprocessed/bkg_vg250/dataset.md new file mode 100644 index 00000000..a0c0e25a --- /dev/null +++ b/digipipe/store/preprocessed/bkg_vg250/dataset.md @@ -0,0 +1,4 @@ +# Administative areas of Germany + +Geodata of administrative areas (Verwaltungsgebiete 1:250 000) extracted, +reprojected to LAEA Europe(EPSG:3035) and converted to Geopackage. diff --git a/digipipe/store/preprocessed/bmwk_long_term_scenarios/config.yml b/digipipe/store/preprocessed/bmwk_long_term_scenarios/config.yml new file mode 100644 index 00000000..b1e57502 --- /dev/null +++ b/digipipe/store/preprocessed/bmwk_long_term_scenarios/config.yml @@ -0,0 +1,149 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be extracted from raw dataset +files_extract: + [ + "T45-Strom_buildings_heating_demand_by_carrier.csv", + "T45-Strom_buildings_heating_structure_by_technology.csv", + "T45-Strom_cts_demand.csv", + "T45-Strom_electricity_installed_power.csv", + "T45-Strom_Generation_Heatgrids_Germany.csv", + "T45-Strom_hh_demand.csv", + "T45-Strom_ind_demand.csv", + "TN-Strom_buildings_heating_demand_by_carrier.csv", + "TN-Strom_buildings_heating_structure_by_technology.csv", + "TN-Strom_cts_demand.csv", + "TN-Strom_hh_demand.csv", + "TN-Strom_ind_demand.csv", + ] + +# New column names +rename_columns: + T45-Strom_buildings_heating_demand_by_carrier: + {" Jahr / Year": "year", + "Energiebedarf in TWh / Energy Demand in TWh": "demand", + "Energieträger / Energy Carrier": "carrier"} + T45-Strom_buildings_heating_structure_by_technology: + {"Jahr / Year": "year", + "Anzahl der Heizungen in Mio. / Number of heaters in mil.": "heating_units_count", + "Typ / type": "heating_technology"} + T45-Strom_cts_demand: + {"Jahr / Year": "year", + "Energiebedarf in TWh / Energy Demand in TWh": "demand", + "Energieträger / Energy Carrier": "carrier"} + T45-Strom_electricity_installed_power: + {"Jahr / Year": "year", + "Technologie / Technology": "technology", + "Leistung in GW / Capacity in GW": "capacity"} + T45-Strom_Generation_Heatgrids_Germany: + {"Jahr / Year": "year", + "Technologie / Technology": "technology", + "Erzeugung in TWh / Generation in TWh": "generation"} + T45-Strom_hh_demand: + {"Jahr / Year": "year", + "Energiebedarf in TWh / Energy Demand in TWh": "demand", + "Energieträger / Energy Carrier": "carrier"} + T45-Strom_ind_demand: + {"Jahr / Year": "year", + "Energiebedarf in TWh / Energy Demand in TWh": "demand", + "Energieträger / Energy Carrier": "carrier"} + TN-Strom_buildings_heating_demand_by_carrier: + {"Jahr": "year", + "Energiebedarf in TWh": "demand", + "Typ": "carrier"} + TN-Strom_buildings_heating_structure_by_technology: + {"Jahr": "year", + "Anlagen in Mio.": "heating_units_count", + "Typ": "heating_technology"} + TN-Strom_cts_demand: + {"Jahr": "year", + "TWh": "demand", + "Typ": "carrier"} + TN-Strom_hh_demand: + {"Jahr": "year", + "TWh": "demand", + "Typ": "carrier"} + TN-Strom_ind_demand: + {"Jahr": "year", + "TWh": "demand", + "Energy Carrier": "carrier"} + +# New names for carriers and technologies +rename_entries: + {'Abfall KWK': 'waste_chp', + 'Andere': 'other', + 'Andere Fossile': 'other_fossil_fuel', + 'Andere fossile': 'other_fossil_fuel', + 'Andere fossile KWK': 'other_fossil_fuel_chp', + 'BHKW': 'chp_small', + 'Biobenzin': 'biopetrol', + 'Biodiesel': 'biodiesel', + 'Biogas': 'biogas', + 'Biokraftstoffe': 'biofuels', + 'Biomasse': 'biomass', + 'Biomasse, fest': 'biomass', + 'Biomasse KWK': 'biomass_chp', + 'Biotreibstoffe': 'biofuels', + 'Braunkohle': 'lignite', + 'EE-Methan': 'renewable_methane', + 'Elektrokessel': 'electricity_direct_heating', + 'Erdgas': 'natural_gas', + 'Fernwärme': 'district_heating', + 'Fossile': 'fossil_fuel', + 'Gas': 'natural_gas', + 'Gas-Heizkessel': 'gas_boiler', + 'Gas Heizkessel': 'gas_boiler', + 'Gas KWK': 'methane_chp', + 'Geothermie': 'geothermal', + 'Gichtgas': 'blast_furnace_gas', + 'Großwärmepumpen': 'heat_pump', + 'Hackschnitzel': 'woodchip', + 'Hackschnitzel-Heizkessel': 'woodchip_boiler', + 'Hackschnitzel-Heizungen': 'woodchip_boiler', + 'Heizöl': 'fuel_oil', + 'Hybrid-Wärmepumpen': 'hybrid_heat_pump', + 'Kohle': 'lignite', + 'Kokereigas, Stadtgas, LPG, Raffineriegas': 'coke_illuminating_refinery_gas_lpg', + 'Koks': 'coke', + 'Methan': 'methane', + 'Mineralöle': 'mineral_oil', + 'Müll nicht erneuerbar': 'waste_non-renewable', + 'Müll, erneuerbar': 'waste_renewable', + 'Müll, nicht erneuerbar': 'waste_non-renewable', + 'Nah- und Fernwärme': 'district_heating', + 'PV': 'pv', + 'Pellet-Heizkessel': 'pellet_boiler', + 'Pellets': 'pellet', + 'Pflanzenöl': 'plant_oil', + 'PtG': 'ptg', + 'Solarenergie': 'solar_energy', + 'Solarthermie': 'solar_thermal', + 'Solarthermie Hzg. u. TW': 'solar_thermal', + 'Steinkohle': 'black_coal', + 'Strom': 'electricity', + 'Strom Hilfsenergie': 'electricity_auxiliary', + 'Strom Wärmepumpe': 'electricity_heat_pump', + 'Strom direkt': 'electricity_direct_heating', + 'Strom direktelektrisch': 'electricity_direct_heating', + 'Strom-Direktheizungen': 'electric_boiler', + 'Umgebungswärme': 'ambient_heat', + 'Umgebungswärme, Boden': 'ambient_heat_soil', + 'Umgebungswärme, Luft': 'ambient_heat_air', + 'Umweltwärme Wärmepumpe': 'ambient_heat_heat_pump', + 'WP-Strom': 'electricity_heat_pump', + 'WP-Umgebungswärme': 'ambient_heat_heat_pump', + 'Wasserkraft': 'hydro', + 'Wasserstoff': 'hydrogen', + 'Wasserstoff Kessel': 'hydrogen_boiler', + 'Wasserstoff KWK': 'hydrogen_chp', + 'Wind an Land': 'wind_onshore', + 'Wind auf See': 'sind_offshore', + 'Wärmenetzanschlüsse': 'district_heating_connection', + 'Wärmenetze': 'district_heating_connection', + 'Wärmepumpen': 'heat_pump', + 'feste biogene Stoffe': 'biomass', + 'Öl-Heizkessel': 'fuel_oil_boiler', + 'Übrige Erneuerbare': 'other_renewables', + 'Übrige, erneuerbar': 'other_renewables'} diff --git a/digipipe/store/preprocessed/bmwk_long_term_scenarios/create.smk b/digipipe/store/preprocessed/bmwk_long_term_scenarios/create.smk new file mode 100644 index 00000000..11317abc --- /dev/null +++ b/digipipe/store/preprocessed/bmwk_long_term_scenarios/create.smk @@ -0,0 +1,74 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import json +import os +import pandas as pd +from pathlib import Path + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "preprocessed", "bmwk_long_term_scenarios", data_dir=True) + +rule create: + """ + Extract files + """ + input: + get_abs_dataset_path( + "raw", "bmwk_long_term_scenarios") / "data" / + "bmwk_long_term_scenarios.zip" + output: + files=[DATASET_PATH / f for f in config["files_extract"]] + params: + outpath=DATASET_PATH, + files_extract=" ".join(config["files_extract"]) + shell: + """ + unzip -j {input} {params.files_extract} -d {params.outpath} + """ + +rule rename_columns_carriers: + """ + Rename columns, carriers and technologies + """ + input: DATASET_PATH / "{file}.csv" + output: DATASET_PATH / "{file}_reformatted.csv" + run: + data = pd.read_csv(input[0]) + + # Rename columns + data.rename( + columns=config["rename_columns"].get(wildcards.file), + inplace=True + ) + if not all( + c in config["rename_columns"].get(wildcards.file).values() + for c in data.columns + ): + raise ValueError("At least one column was not renamed!") + # Rename carriers and technologies + data = data.replace(config["rename_entries"]) + + data.to_csv(output[0], index=False) + #os.remove(input[0]) + +rule create_captions: + """ + Create attribute captions for app + """ + input: [DATASET_PATH / f for f in config["files_extract"]] + output: DATASET_PATH / "bmwk_long_term_scenarios_attribute_captions.json" + run: + captions = dict( + datasets_caption_map={ + Path(f).stem: "bmwk_long_term_scenarios" for f in input}, + captions={"bmwk_long_term_scenarios": { + v: k for k, v in config["rename_entries"].items()} + }, + ) + with open(output[0], "w", encoding="utf8") as f: + json.dump(captions, f, indent=4) diff --git a/digipipe/store/preprocessed/bmwk_long_term_scenarios/data/.gitkeep b/digipipe/store/preprocessed/bmwk_long_term_scenarios/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md b/digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md new file mode 100644 index 00000000..45c723b4 --- /dev/null +++ b/digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md @@ -0,0 +1,7 @@ +# BMWK Langfristszenarien + +Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten +auf Landesebene, extrahiert. + +Raw dataset: +[bmwk_long_term_scenarios](../../raw/bmwk_long_term_scenarios/dataset.md) diff --git a/digipipe/store/preprocessed/bnetza_mastr/__init__.py b/digipipe/store/preprocessed/bnetza_mastr/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/bnetza_mastr/config.yml b/digipipe/store/preprocessed/bnetza_mastr/config.yml new file mode 100644 index 00000000..3a350ad0 --- /dev/null +++ b/digipipe/store/preprocessed/bnetza_mastr/config.yml @@ -0,0 +1,22 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be extracted from raw dataset +files_extract: + [ + "bnetza_mastr_biomass_raw.csv", + "bnetza_mastr_combustion_raw.csv", + "bnetza_mastr_electricity_consumer_raw.csv", + "bnetza_mastr_gas_consumer_raw.csv", + "bnetza_mastr_gas_producer_raw.csv", + "bnetza_mastr_gas_storage_extended_raw.csv", + "bnetza_mastr_grid_connections_raw.csv", + "bnetza_mastr_gsgk_raw.csv", + "bnetza_mastr_hydro_raw.csv", + "bnetza_mastr_locations_extended_raw.csv", + "bnetza_mastr_solar_raw.csv", + "bnetza_mastr_storage_raw.csv", + "bnetza_mastr_storage_unit_raw.csv", + "bnetza_mastr_wind_raw.csv", + ] diff --git a/digipipe/store/preprocessed/bnetza_mastr/create.smk b/digipipe/store/preprocessed/bnetza_mastr/create.smk new file mode 100644 index 00000000..5b67dad6 --- /dev/null +++ b/digipipe/store/preprocessed/bnetza_mastr/create.smk @@ -0,0 +1,22 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "bnetza_mastr") + +rule create: + input: + get_abs_dataset_path("raw", "bnetza_mastr") / "data" / "bnetza_open_mastr_2022-12-19.zip" + output: + files=[DATASET_PATH / "data" / f for f in config["files_extract"]] + params: + outpath=DATASET_PATH / "data", + files_extract=" ".join([f"bnetza_open_mastr_2022-12-19/{f}" for f in config["files_extract"]]) + shell: + """ + unzip -j {input} {params.files_extract} -d {params.outpath} + """ diff --git a/digipipe/store/preprocessed/bnetza_mastr/data/.gitkeep b/digipipe/store/preprocessed/bnetza_mastr/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/bnetza_mastr/dataset.md b/digipipe/store/preprocessed/bnetza_mastr/dataset.md new file mode 100644 index 00000000..d36d7e0e --- /dev/null +++ b/digipipe/store/preprocessed/bnetza_mastr/dataset.md @@ -0,0 +1,3 @@ +# Erzeugungsanlagen aus Marktstammdatenregister + +Erzeugungsanlagen aus dem MaStR für ausgewählte Technologien. diff --git a/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/config.yml b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/config.yml new file mode 100644 index 00000000..d8db3d15 --- /dev/null +++ b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/config.yml @@ -0,0 +1,25 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Years specified for the projections in the source +years : + [ + "2020", + "2050" + ] + +# Type of supply: Decentralized and centralized heat supply +supplies : + [ + "central", + "decentral" + ] + +# For simplicity, the projections for 2050 are assigned to 2045 and those for +# 2020 to 2022. +year_mapping : + { + "2050": "2045", + "2020": "2022" + } diff --git a/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/create.smk b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/create.smk new file mode 100644 index 00000000..ce54176b --- /dev/null +++ b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/create.smk @@ -0,0 +1,82 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import pandas as pd +from digipipe.store.utils import get_abs_dataset_path + + +DATASET_PATH = get_abs_dataset_path("preprocessed", "dbfz_biomass_capacity_rel") + +rule create: + input: + get_abs_dataset_path( + "raw", "dbfz_biomass_heat_capacities" + ) / "data" / "dbfz_biomass_heat_capacities.csv" + output: + output_cen=DATASET_PATH / "data" / "dbfz_biomass_capacity_rel_central.csv", + output_dec=DATASET_PATH/ "data" / "dbfz_biomass_capacity_rel_decentral.csv" + run: + for supply in config["supplies"]: + all_shares_biomass_cap = pd.DataFrame() + for year in config["years"]: + # Read raw file + biomass_cap = pd.read_csv( + input[0], + sep=";", + usecols=["carrier", "tech", "capacity_[MW]_" + year, supply], + ) + + # Drop nan values + biomass_cap = biomass_cap.dropna() + + # Group by the columns "carrier" and "tech" + biomass_cap = biomass_cap.groupby(by=["carrier", "tech"]).sum( + numeric_only=True + ) + + # Calculate the share of the installed capacity per grouped + # technologies + share_biomass_cap = biomass_cap.apply( + lambda biomass_cap: biomass_cap / biomass_cap.sum() + ) + # Rename the column and drop the year from the name of the column with + # the capacity + share_biomass_cap.rename( + {"capacity_[MW]_" + year: "capacity_rel"}, axis=1, inplace=True + ) + + # Drop multiindex made by grouping + share_biomass_cap.reset_index(inplace=True) + + # Write tech into carrier col with the convention: + # "carrier_tech" + share_biomass_cap["carrier"] = share_biomass_cap[ + ["carrier", "tech"] + ].agg("_".join, axis=1) + + # Delete redundant column "tech" + del share_biomass_cap["tech"] + + # Add year according to year mapping + share_biomass_cap["year"] = config["year_mapping"][year] + + # Set index on the year to move the column to the front + share_biomass_cap.set_index("year", inplace=True) + share_biomass_cap.reset_index(inplace=True) + + # Update final df with all shares + all_shares_biomass_cap = pd.concat( + [all_shares_biomass_cap, share_biomass_cap] + ) + + # Save the file + if supply == "central": + all_shares_biomass_cap.to_csv(output.output_cen, index=False) + elif supply == "decentral": + all_shares_biomass_cap.to_csv(output.output_dec, index=False) + else: + raise ValueError("Please provide either 'central' or 'decentral'" + "with 'supply'") diff --git a/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/data/.gitkeep b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/dataset.md b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/dataset.md new file mode 100644 index 00000000..b186f864 --- /dev/null +++ b/digipipe/store/preprocessed/dbfz_biomass_capacity_rel/dataset.md @@ -0,0 +1,15 @@ +# Anteile von Biomasse-Konversionsanlagen anhand installierter Leistung + +Berechnung der Anteile der installierten Leistung an der gesamten installierten +Leistung der Biomasse-Konversionsanlagen. + +Die installierten Leistungen werden +[dbfz_biomass_heat_capacities](../../raw/dbfz_biomass_heat_capacities/dataset.md) +entnommen. Sie werden nach Energieträger (Biogas, Methan oder Holz) und +Technologie (BHKW (bpchp), Turbine mit Kondensationsentnahme (extchp) oder +Ofen (oven)) zusammengefasst. Anschließend wird der Anteil der installierten +Leistung an der gesamten installierten Leistung der Biomasse-Konversionsanlagen +berechnet. Der Einfachheit halber werden die Projektionen für 2050 dem Jahr +2045 und die für 2020 dem Jahr 2022 zugeordnet. Der Energieträger und die +Technologie (vgl. [dbfz_biomass_heat_capacities](../../raw/dbfz_biomass_heat_capacities/dataset.md)) +werden in einer Spalte zusammengefasst. diff --git a/digipipe/store/preprocessed/demandregio/__init__.py b/digipipe/store/preprocessed/demandregio/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/demandregio/config.yml b/digipipe/store/preprocessed/demandregio/config.yml new file mode 100644 index 00000000..a5ad93e2 --- /dev/null +++ b/digipipe/store/preprocessed/demandregio/config.yml @@ -0,0 +1,22 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be extracted from raw dataset +files_extract: + [ + "dr_cts_gas_timeseries_2011.csv", + "dr_cts_power_demand_2022.csv", + "dr_cts_power_timeseries_2022.csv", + "dr_hh_gas_timeseries_2011.csv", + "dr_hh_households_2011.csv", + "dr_hh_population.csv", + "dr_hh_power_demand_2022.csv", + "dr_hh_power_timeseries_2022.csv", + "dr_ind_gas_demand_2022.csv", + "dr_ind_gas_timeseries_2011.csv", + "dr_ind_gas_timeseries_2022.csv", + "dr_ind_power_demand_2022.csv", + "dr_ind_power_timeseries_2022.csv", + "dr_temperature_ambient_2011.csv", + ] diff --git a/digipipe/store/preprocessed/demandregio/create.smk b/digipipe/store/preprocessed/demandregio/create.smk new file mode 100644 index 00000000..70552080 --- /dev/null +++ b/digipipe/store/preprocessed/demandregio/create.smk @@ -0,0 +1,22 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "demandregio", data_dir=True) + +rule create: + input: + get_abs_dataset_path("raw", "demandregio") / "data" / "demandregio.zip" + output: + files=[DATASET_PATH / f for f in config["files_extract"]] + params: + outpath=DATASET_PATH, + files_extract=" ".join(config["files_extract"]) + shell: + """ + unzip -j {input} {params.files_extract} -d {params.outpath} + """ diff --git a/digipipe/store/preprocessed/demandregio/data/.gitkeep b/digipipe/store/preprocessed/demandregio/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/demandregio/dataset.md b/digipipe/store/preprocessed/demandregio/dataset.md new file mode 100644 index 00000000..5648b787 --- /dev/null +++ b/digipipe/store/preprocessed/demandregio/dataset.md @@ -0,0 +1,7 @@ +# DemandRegio + +Regionalisierte Bevölkerungsprognose sowie Strom-, Wärme und Gasbedarf auf +Landkreisebene, extrahiert. + +Enthält Jahresverbräuche und Zeitreihen für die Sektoren Haushalte, Gewerbe, +Handel, Dienstleistungen (GHD) und Industrie für mehrere Zieljahre. diff --git a/digipipe/store/preprocessed/destatis_gv/__init__.py b/digipipe/store/preprocessed/destatis_gv/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/destatis_gv/config.yml b/digipipe/store/preprocessed/destatis_gv/config.yml new file mode 100644 index 00000000..380775cf --- /dev/null +++ b/digipipe/store/preprocessed/destatis_gv/config.yml @@ -0,0 +1,31 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Excel file +excel_file: + sheet_name: + { + "2010": "Gemeindedaten", + "2015": "Onlineprodukt_Gemeinden_311215", + "2020": "Onlineprodukt_Gemeinden", + "2021": "Onlineprodukt_Gemeinden", + "2022": "Onlineprodukt_Gemeinden" + } + usecols: "C:E, G, J" + skiprows: 6 + names: + [ + "Land", + "RB", + "Kreis", + "Gem", + "population" + ] + dtype: + { + "Land": str, + "RB": str, + "Kreis": str, + "Gem": str + } diff --git a/digipipe/store/preprocessed/destatis_gv/create.smk b/digipipe/store/preprocessed/destatis_gv/create.smk new file mode 100644 index 00000000..516e8bc2 --- /dev/null +++ b/digipipe/store/preprocessed/destatis_gv/create.smk @@ -0,0 +1,20 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "destatis_gv") + +rule create: + """ + Extract municipality population data from Excel files and save to CSVs + """ + input: + get_abs_dataset_path("raw", "destatis_gv") / "data" / "3112{year}_Auszug_GV.xlsx" + output: + DATASET_PATH / "data" / "3112{year}_Auszug_GV.csv" + script: + DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/preprocessed/destatis_gv/data/.gitkeep b/digipipe/store/preprocessed/destatis_gv/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/destatis_gv/dataset.md b/digipipe/store/preprocessed/destatis_gv/dataset.md new file mode 100644 index 00000000..6c0d0ce1 --- /dev/null +++ b/digipipe/store/preprocessed/destatis_gv/dataset.md @@ -0,0 +1,4 @@ +# Bevölkerung + +Einwohnerzahl nach Gemeinden des Statistischen Bundesamts für die Jahre +2010, 2015, 2020, 2021, 2022. diff --git a/digipipe/store/preprocessed/destatis_gv/scripts/create.py b/digipipe/store/preprocessed/destatis_gv/scripts/create.py new file mode 100644 index 00000000..3e5083c3 --- /dev/null +++ b/digipipe/store/preprocessed/destatis_gv/scripts/create.py @@ -0,0 +1,30 @@ +import pandas as pd + + +def process() -> None: + # Get Excel config and set sheet name + excel_cfg = snakemake.config["excel_file"] + excel_cfg["sheet_name"] = excel_cfg["sheet_name"][snakemake.wildcards.year] + + data = pd.read_excel( + snakemake.input[0], + **excel_cfg, + engine="openpyxl", + ) + + # Drop non-municipal data + data = data.loc[~data.isnull().any(axis=1)] + data = data.assign(population=data.population.astype(int)).rename( + columns={"population": snakemake.wildcards.year} + ) + + # Create AGS and drop old cols + data = data.assign(ags=data.Land + data.RB + data.Kreis + data.Gem) + + data[["ags", snakemake.wildcards.year]].to_csv( + snakemake.output[0], index=None + ) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/preprocessed/dwd_temperature/config.yml b/digipipe/store/preprocessed/dwd_temperature/config.yml new file mode 100644 index 00000000..17d3af5a --- /dev/null +++ b/digipipe/store/preprocessed/dwd_temperature/config.yml @@ -0,0 +1,3 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## diff --git a/digipipe/store/preprocessed/dwd_temperature/create.smk b/digipipe/store/preprocessed/dwd_temperature/create.smk new file mode 100644 index 00000000..ff15223d --- /dev/null +++ b/digipipe/store/preprocessed/dwd_temperature/create.smk @@ -0,0 +1,32 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import pandas as pd +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "dwd_temperature", data_dir=True) + +rule create: + """ + Calc mean temperatures for entire region + """ + input: + temperature=get_abs_dataset_path( + "raw", "dwd_temperature") / "data" / "temperature_2011.csv" + output: + temperature=DATASET_PATH / "temperature_2011.csv" + run: + temp = pd.read_csv( + input.temperature, + index_col=["timestamp", "ags_id"], + ) + # Calc regional mean from municipal values + temp = temp.astype("float").reset_index().drop( + columns=["ags_id"]).groupby("timestamp").agg("mean").round(2) + # Remove timestamp + temp = temp.reset_index().drop(columns=["timestamp"]) + # Dump + temp.to_csv(output.temperature) diff --git a/digipipe/store/preprocessed/dwd_temperature/data/.gitkeep b/digipipe/store/preprocessed/dwd_temperature/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/dwd_temperature/dataset.md b/digipipe/store/preprocessed/dwd_temperature/dataset.md new file mode 100644 index 00000000..34fa574b --- /dev/null +++ b/digipipe/store/preprocessed/dwd_temperature/dataset.md @@ -0,0 +1,6 @@ +# Temperatur + +Stündliche Mittelwerte der Luft- und Erdbodentemperatur für die Region ABW, +Mittelwert für alle Gemeinden. + +Verwendet: [dwd_temperature](../../raw/dwd_temperature/dataset.md) diff --git a/digipipe/store/preprocessed/eurostat_lau/__init__.py b/digipipe/store/preprocessed/eurostat_lau/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/eurostat_lau/config.yml b/digipipe/store/preprocessed/eurostat_lau/config.yml new file mode 100644 index 00000000..d310985d --- /dev/null +++ b/digipipe/store/preprocessed/eurostat_lau/config.yml @@ -0,0 +1,15 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Excel file +excel_file: + sheet_name: "DE" + usecols: "A:C" + dtype: + {"LAU CODE": str} + +column_names: + ["nuts_code", + "lau_code", + "name"] diff --git a/digipipe/store/preprocessed/eurostat_lau/create.smk b/digipipe/store/preprocessed/eurostat_lau/create.smk new file mode 100644 index 00000000..2cb6b279 --- /dev/null +++ b/digipipe/store/preprocessed/eurostat_lau/create.smk @@ -0,0 +1,17 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "eurostat_lau") + +rule create: + """ + Extract Germany's LAUs from Excel files and save to CSV + """ + input: get_abs_dataset_path("raw", "eurostat_lau") / "data" / "EU-27-LAU-2022-NUTS-2021.xlsx" + output: DATASET_PATH / "data" / "germany_lau_codes.csv" + script: DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/preprocessed/eurostat_lau/data/.gitkeep b/digipipe/store/preprocessed/eurostat_lau/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/eurostat_lau/dataset.md b/digipipe/store/preprocessed/eurostat_lau/dataset.md new file mode 100644 index 00000000..0ce58069 --- /dev/null +++ b/digipipe/store/preprocessed/eurostat_lau/dataset.md @@ -0,0 +1,7 @@ +# Lokale Verwaltungseinheiten + +Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese +LAUs sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der +Europäischen Union. + +Daten aus Excel extrahiert und in CSV exportiert. diff --git a/digipipe/store/preprocessed/eurostat_lau/scripts/create.py b/digipipe/store/preprocessed/eurostat_lau/scripts/create.py new file mode 100644 index 00000000..93a62db6 --- /dev/null +++ b/digipipe/store/preprocessed/eurostat_lau/scripts/create.py @@ -0,0 +1,22 @@ +import pandas as pd + + +def process() -> None: + cfg = snakemake.config + laus = pd.read_excel( + snakemake.input[0], + **cfg["excel_file"], + engine="openpyxl", + ) + + # Rename columns + laus.columns = cfg["column_names"] + laus.set_index("lau_code", inplace=True) + laus.dropna(inplace=True) + + # Dump + laus.to_csv(snakemake.output[0]) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/preprocessed/module.smk b/digipipe/store/preprocessed/module.smk new file mode 100644 index 00000000..822041c0 --- /dev/null +++ b/digipipe/store/preprocessed/module.smk @@ -0,0 +1,103 @@ +""" +Dataset registry for preprocessed datasets module which is loaded by main +snakemake file. All datasets in the preprocessed category must be added to +this file. + +Template: +--------- +module : + snakefile: "/create.smk" + config: config["store"]["preprocessed"][""] +use rule * from as preprocessed__* + +""" + +module bkg_vg250: + snakefile: "bkg_vg250/create.smk" + config: config["store"]["preprocessed"]["bkg_vg250"] +use rule * from bkg_vg250 as preprocessed_bkg_vg250_* + +module osm_filtered: + snakefile: "osm_filtered/create.smk" + config: config["store"]["preprocessed"]["osm_filtered"] +use rule * from osm_filtered as preprocessed_osm_filtered_* + +module bnetza_mastr: + snakefile: "bnetza_mastr/create.smk" + config: config["store"]["preprocessed"]["bnetza_mastr"] +use rule * from bnetza_mastr as preprocessed_bnetza_mastr_* + +module destatis_gv: + snakefile: "destatis_gv/create.smk" + config: config["store"]["preprocessed"]["destatis_gv"] +use rule * from destatis_gv as preprocessed_destatis_gv_* + +module stala_st_pop_prog: + snakefile: "stala_st_pop_prog/create.smk" + config: config["store"]["preprocessed"]["stala_st_pop_prog"] +use rule * from stala_st_pop_prog as preprocessed_stala_st_pop_prog_* + +module stala_st_energy: + snakefile: "stala_st_energy/create.smk" + config: config["store"]["preprocessed"]["stala_st_energy"] +use rule * from stala_st_energy as preprocessed_stala_st_energy_* + +module demandregio: + snakefile: "demandregio/create.smk" + config: config["store"]["preprocessed"]["demandregio"] +use rule * from demandregio as preprocessed_demandregio_* + +module ba_employment: + snakefile: "ba_employment/create.smk" + config: config["store"]["preprocessed"]["ba_employment"] +use rule * from ba_employment as preprocessed_ba_employment_ * + +module bmwk_long_term_scenarios: + snakefile: "bmwk_long_term_scenarios/create.smk" + config: config["store"]["preprocessed"]["bmwk_long_term_scenarios"] +use rule * from bmwk_long_term_scenarios as preprocessed_bmwk_long_term_scenarios_ * + +module seenergies_peta5: + snakefile: "seenergies_peta5/create.smk" + config: config["store"]["preprocessed"]["seenergies_peta5"] +use rule * from seenergies_peta5 as preprocessed_seenergies_peta5_ * + +module ageb_energy_balance: + snakefile: "ageb_energy_balance/create.smk" + config: config["store"]["preprocessed"]["ageb_energy_balance"] +use rule * from ageb_energy_balance as preprocessed_ageb_energy_balance_ * + +module dwd_temperature: + snakefile: "dwd_temperature/create.smk" + config: config["store"]["preprocessed"]["dwd_temperature"] +use rule * from dwd_temperature as preprocessed_dwd_temperature_ * + +module regiostat: + snakefile: "regiostat/create.smk" + config: config["store"]["preprocessed"]["regiostat"] +use rule * from regiostat as preprocessed_regiostat_ * + +module eurostat_lau: + snakefile: "eurostat_lau/create.smk" + config: config["store"]["preprocessed"]["eurostat_lau"] +use rule * from eurostat_lau as preprocessed_eurostat_lau_ * + +module rpg_abw_regional_plan: + snakefile: "rpg_abw_regional_plan/create.smk" + config: config["store"]["preprocessed"]["rpg_abw_regional_plan"] +use rule * from rpg_abw_regional_plan as preprocessed_rpg_abw_regional_plan_ * + +module rli_pv_wfr: + snakefile: "rli_pv_wfr/create.smk" + config: config["store"]["preprocessed"]["rli_pv_wfr"] +use rule * from rli_pv_wfr as preprocessed_rli_pv_wfr_ * + +module rpg_abw_pv_roof_potential: + snakefile: "rpg_abw_pv_roof_potential/create.smk" + config: config["store"]["preprocessed"]["rpg_abw_pv_roof_potential"] +use rule * from rpg_abw_pv_roof_potential as preprocessed_rpg_abw_pv_roof_potential_ * + +module dbfz_biomass_capacity_rel: + snakefile: "dbfz_biomass_capacity_rel/create.smk" + config: config["store"]["preprocessed"]["dbfz_biomass_capacity_rel"] +use rule * from dbfz_biomass_capacity_rel as preprocessed_dbfz_biomass_capacity_rel_ * diff --git a/digipipe/store/preprocessed/osm_filtered/__init__.py b/digipipe/store/preprocessed/osm_filtered/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/osm_filtered/config.yml b/digipipe/store/preprocessed/osm_filtered/config.yml new file mode 100644 index 00000000..b68620c0 --- /dev/null +++ b/digipipe/store/preprocessed/osm_filtered/config.yml @@ -0,0 +1,9 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Tags to include (format: [tag, value])) +tags: + [ + ["building", "*"], + ] diff --git a/digipipe/store/preprocessed/osm_filtered/create.smk b/digipipe/store/preprocessed/osm_filtered/create.smk new file mode 100644 index 00000000..38f10ef1 --- /dev/null +++ b/digipipe/store/preprocessed/osm_filtered/create.smk @@ -0,0 +1,30 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import ( + get_abs_dataset_path, + create_tag_string_osmium, + create_tag_string_ogr +) + +DATASET_PATH = get_abs_dataset_path( + "preprocessed", "osm_filtered", data_dir=True) + +rule convert: + """ + Convert OSM pbf file while filtering for all requested tags. All attributes are retained. + """ + input: get_abs_dataset_path("raw", "osm") / "data" / "germany-230101.osm.pbf" + output: DATASET_PATH / "germany-230101_filtered.osm.gpkg" + params: + temp_file=DATASET_PATH / "germany-230101_filtered.osm.pbf", + tags=create_tag_string_osmium(config["tags"]) + shell: + """ + osmium tags-filter --remove-tags -f osm {input} {params.tags} -o {params.temp_file} + ogr2ogr -f GPKG -t_srs EPSG:3035 {output} {params.temp_file} + rm {params.temp_file} + """ diff --git a/digipipe/store/preprocessed/osm_filtered/data/.gitkeep b/digipipe/store/preprocessed/osm_filtered/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/osm_filtered/dataset.md b/digipipe/store/preprocessed/osm_filtered/dataset.md new file mode 100644 index 00000000..26b16811 --- /dev/null +++ b/digipipe/store/preprocessed/osm_filtered/dataset.md @@ -0,0 +1,7 @@ +# OpenStreetMap gefiltert + +OSM data nach bestimmten Tags (s. [config.yml](config.yml) --> `tags`) gefiltert, +zu LAEA Europe (EPSG:3035) umprojiziert und in ein Geopackage konvertiert. + +**Achtung:** Konvertierungs- und Extraktionsprozess benötigt ~50 GB +Speicherplatz und kann viel Zeit in Anspruch nehmen. diff --git a/digipipe/store/preprocessed/regiostat/__init__.py b/digipipe/store/preprocessed/regiostat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/regiostat/config.yml b/digipipe/store/preprocessed/regiostat/config.yml new file mode 100644 index 00000000..76e169a9 --- /dev/null +++ b/digipipe/store/preprocessed/regiostat/config.yml @@ -0,0 +1,39 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Demand industry: Excel file +demand_ind: + excel_file: + sheet_name: "43531-01-02-4" + usecols: "A:B, C:J" + skiprows: 4 + nrows: 540 + names: + [ + "lau_code", + "name", + "total", + "lignite", + "fuel_oil", + "natural_gas", + "renewables", + "electrical_energy", + "heat", + "other" + ] + +# Employment industry: Excel file +employment_ind: + excel_file: + sheet_name: "42111-01-04-5" + usecols: "A:B, C:D" + skiprows: 2 + nrows: 13922 + names: + [ + "ags", + "name", + "companies_ind", + "employees_ind" + ] diff --git a/digipipe/store/preprocessed/regiostat/create.smk b/digipipe/store/preprocessed/regiostat/create.smk new file mode 100644 index 00000000..26d5b4ac --- /dev/null +++ b/digipipe/store/preprocessed/regiostat/create.smk @@ -0,0 +1,44 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path +from digipipe.store.preprocessed.regiostat.scripts import create + +DATASET_PATH = get_abs_dataset_path("preprocessed", "regiostat") + +rule extract_demand_ind: + """ + Extract energy demand for industry from Excel files and save to CSV + """ + input: get_abs_dataset_path("raw", "regiostat") / "data" / "43531-01-02-4.xlsx" + output: + demand_states=DATASET_PATH / "data" / "demand_energy_industry_states.csv", + demand_districts=DATASET_PATH/ "data" / "demand_energy_industry_districts.csv" + run: + create.extract_demand_ind( + infile=input[0], + outfile_states=output.demand_states, + outfile_districts=output.demand_districts, + cfg=config + ) + +rule extract_employment_ind: + """ + Extract employment for industry from Excel files and save to CSV + """ + input: get_abs_dataset_path("raw", "regiostat") / "data" / "42111-01-04-5.xlsx" + output: + employment_states=DATASET_PATH / "data" / "employment_industry_states.csv", + employment_districts=DATASET_PATH/ "data" / "employment_industry_districts.csv", + employment_muns=DATASET_PATH/ "data" / "employment_industry_muns.csv" + run: + create.extract_employment_ind( + infile=input[0], + outfile_states=output.employment_states, + outfile_districts=output.employment_districts, + outfile_muns=output.employment_muns, + cfg=config + ) diff --git a/digipipe/store/preprocessed/regiostat/data/.gitkeep b/digipipe/store/preprocessed/regiostat/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/regiostat/dataset.md b/digipipe/store/preprocessed/regiostat/dataset.md new file mode 100644 index 00000000..5d67355f --- /dev/null +++ b/digipipe/store/preprocessed/regiostat/dataset.md @@ -0,0 +1,24 @@ +# Regionalstatistik (GENESIS) + +Enthält Datensätze der statistischen Ämter des Bundes und der Länder aus +[regiostat](../../raw/regiostat/dataset.md). + +## Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4) + +Jahreserhebung ü. die Energieverwendung der Betriebe im verarbeitendem Gewerbe. + +Änderungen: + +- Dateiformat konvertiert +- Bundesland-, Kreis und Gemeindewerte extrahiert +- Energie in TWh konvertiert + +## Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5) + +Jahreserhebung ü. Betriebe, tätige Personen und Bruttoentgelte der Betriebe im +verarbeitendem Gewerbe. + +Änderungen: + +- Dateiformat konvertiert +- Bundesland-, Kreis und Gemeindewerte extrahiert diff --git a/digipipe/store/preprocessed/regiostat/scripts/create.py b/digipipe/store/preprocessed/regiostat/scripts/create.py new file mode 100644 index 00000000..cedaf385 --- /dev/null +++ b/digipipe/store/preprocessed/regiostat/scripts/create.py @@ -0,0 +1,103 @@ +from pathlib import Path + +import numpy as np +import pandas as pd + + +def extract_demand_ind( + infile: Path, outfile_states: Path, outfile_districts: Path, cfg: dict +) -> None: + data = pd.read_excel( + infile, + **cfg["demand_ind"]["excel_file"], + ) + # Drop unnecessary headers + data = data.iloc[3:] + # Use correct zero value and set unavailable values to NaN + data = data.replace("-", 0).replace(".", np.nan) + + # Convert from GJ to MWh + data = data.set_index(["lau_code", "name"]).div(3.6).reset_index() + + # Filter: federal states + data_states = data.loc[data.lau_code.str.len() == 2].set_index("lau_code") + data_states["name"] = data_states["name"].str[2:] + data_states.to_csv(outfile_states) + + # Filter: districts + data_districts = data.loc[data.lau_code.str.len() == 5].set_index( + "lau_code" + ) + data_districts["name"] = data_districts["name"].str[6:] + data_districts.to_csv(outfile_districts) + + +def extract_employment_ind( + infile: Path, + outfile_states: Path, + outfile_districts: Path, + outfile_muns: Path, + cfg: dict, +) -> None: + data = pd.read_excel( + infile, + **cfg["employment_ind"]["excel_file"], + ) + + # Drop unnecessary headers + data = data.iloc[3:] + # Use correct zero value and set unavailable values to NaN + data = data.replace("-", 0).replace(".", np.nan) + + # Filter: federal states + print("Extracting industrial employees and companies: Federal state level") + data_states = data.loc[data.ags.str.len() == 2].set_index("ags") + data_states["name"] = data_states["name"].str[2:] + data_states.to_csv(outfile_states) + print(f" Employees: {data_states.employees_ind.sum()}") + print(f" Companies: {data_states.companies_ind.sum()}") + + # Filter: districts + print("Extracting industrial employees and companies: District level") + data_districts = data.loc[data.ags.str.len() == 5].set_index("ags") + data_districts["name"] = data_districts["name"].str[6:] + data_districts.to_csv(outfile_districts) + print(f" Employees: {data_districts.employees_ind.sum()}") + print(f" Companies: {data_districts.companies_ind.sum()}") + if ( + data_districts.employees_ind.sum() != data_states.employees_ind.sum() + ) or ( + data_districts.companies_ind.sum() != data_states.companies_ind.sum() + ): + print( + " WARNING: Numbers do not equal state values, " + "probably due to missing values!" + ) + + # Filter: municipalities + # Note: Kreisfreie Städte are not included in muns, so they're inserted + # manually + print("Extracting industrial employees and companies: Municipal level") + data_muns = data.loc[data.ags.str.len() == 8].set_index("ags") + data_muns["name"] = data_muns["name"].str[8:] + + missing_districts = data_districts.loc[ + ~data_districts.index.isin(data_muns.index.str[:5]) + ] + missing_districts.index += "000" + data_muns = pd.concat([data_muns, missing_districts], axis=0).sort_index() + print( + f" {len(missing_districts)} " + f"municipalities missing, filled up with districts" + ) + print(f" Employees: {data_muns.employees_ind.sum()}") + print(f" Companies: {data_muns.companies_ind.sum()}") + if (data_muns.employees_ind.sum() != data_states.employees_ind.sum()) or ( + data_muns.companies_ind.sum() != data_states.companies_ind.sum() + ): + print( + " WARNING: Numbers do not equal state values, " + "probably due to missing values!" + ) + + data_muns.to_csv(outfile_muns) diff --git a/digipipe/store/preprocessed/rli_pv_wfr/config.yml b/digipipe/store/preprocessed/rli_pv_wfr/config.yml new file mode 100644 index 00000000..f876820f --- /dev/null +++ b/digipipe/store/preprocessed/rli_pv_wfr/config.yml @@ -0,0 +1,72 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be extracted from raw dataset +files_extract: + geodata: + [ + "air_traffic_control_system.gpkg", + "aviation.gpkg", + "biosphere_reserve.gpkg", + "drinking_water_protection_area.gpkg", + "fauna_flora_habitat.gpkg", + "floodplain.gpkg", + "forest.gpkg", + "grid.gpkg", + "industry.gpkg", + "landscape_protection_area.gpkg", + "less_favoured_areas_agricultural.gpkg", + "military.gpkg", + "national_park.gpkg", + "nature_conservation_area.gpkg", + "potentialarea_pv_agriculture_lfa-off.gpkg", + "potentialarea_pv_road_railway.gpkg", + "railway.gpkg", + "road.gpkg", + "road_railway-500m.gpkg", + "settlement-0m.gpkg", + "slope.gpkg", + "soil_quality_high.gpkg", + "soil_quality_low.gpkg", + "special_protection_area.gpkg", + "water.gpkg", + "wetland_ramsar.gpkg" + ] + datapackage: + [ + "datapackage/datapackage.json" + ] + metadata: + [ + # Metadata + "metadata/rli_wam_ackerbauliches_ertragspotenzial.json", + "metadata/rli_wam_bahnverkehr.json", + "metadata/rli_wam_benachteiligte_gebiete.json", + "metadata/rli_wam_biosphaerenreservate.json", + "metadata/rli_wam_drehfunkfeuer.json", + "metadata/rli_wam_ee_anlagen.json", + "metadata/rli_wam_faunaflorahabitate.json", + "metadata/rli_wam_feuchtgebiete.json", + "metadata/rli_wam_gelaendeneigung.json", + "metadata/rli_wam_gewaesser.json", + "metadata/rli_wam_industrie.json", + "metadata/rli_wam_landschaftschutzgebiete.json", + "metadata/rli_wam_luftverkehr.json", + "metadata/rli_wam_militaergebiete.json", + "metadata/rli_wam_nationalparke.json", + "metadata/rli_wam_naturschutzgebiete.json", + "metadata/rli_wam_potentialflaeche_photovoltaik.json", + "metadata/rli_wam_potentialflaeche_windenergie.json", + "metadata/rli_wam_pv_energieertrag.json", + "metadata/rli_wam_siedlungen.json", + "metadata/rli_wam_strassen.json", + "metadata/rli_wam_stromnetze.json", + "metadata/rli_wam_ueberschwemmungsgebiete.json", + "metadata/rli_wam_verwaltungsgrenzen.json", + "metadata/rli_wam_vogelschutzgebiete.json", + "metadata/rli_wam_waelder.json", + "metadata/rli_wam_wahlkreise.json", + "metadata/rli_wam_wasserschutzgebiete.json", + "metadata/rli_wam_windleistungsdichte.json" + ] diff --git a/digipipe/store/preprocessed/rli_pv_wfr/create.smk b/digipipe/store/preprocessed/rli_pv_wfr/create.smk new file mode 100644 index 00000000..6e1b50e5 --- /dev/null +++ b/digipipe/store/preprocessed/rli_pv_wfr/create.smk @@ -0,0 +1,35 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import json +import re +import geopandas as gpd +from pathlib import Path +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "preprocessed", "rli_pv_wfr", data_dir=True) + +rule extract: + """ + Extract files: geodata, datapackage and metadata + """ + input: + get_abs_dataset_path( + "raw", "rli_pv_wfr") / "data" / + "rli_pv_windflaechenrechner_geodaten_v1.0.zip" + output: + [DATASET_PATH / f + for files in config["files_extract"].values() for f in files] + params: + outpath=DATASET_PATH, + files_extract=" ".join( + [f for files in config["files_extract"].values() for f in files] + ) + shell: + """ + unzip {input} {params.files_extract} -d {params.outpath} + """ diff --git a/digipipe/store/preprocessed/rli_pv_wfr/data/.gitkeep b/digipipe/store/preprocessed/rli_pv_wfr/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/rli_pv_wfr/dataset.md b/digipipe/store/preprocessed/rli_pv_wfr/dataset.md new file mode 100644 index 00000000..cc162cb2 --- /dev/null +++ b/digipipe/store/preprocessed/rli_pv_wfr/dataset.md @@ -0,0 +1,7 @@ +# Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/), +extrahiert. + +Raw dataset: +[rli_pv_windflaechenrechner](../../raw/rli_pv_wfr/dataset.md) diff --git a/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/__init__.py b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/config.yml b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/config.yml new file mode 100644 index 00000000..7bc55a0d --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/config.yml @@ -0,0 +1,20 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +layer: rpg_abw_pv_roof_potential +attributes: + {"id": "building_id", + "installierte_leistung_kw_sued": "installable_power_kw_south", + "ertrag_jahr_mwh_jahr_sued": "energy_annual_mwh_south", + "installierte_leistung_kw_nord": "installable_power_kw_north", + "ertrag_jahr_mwh_jahr_nord": "energy_annual_mwh_north", + "installierte_leistung_kw_ost": "installable_power_kw_east", + "ertrag_jahr_mwh_jahr_ost": "energy_annual_mwh_east", + "installierte_leistung_kw_west": "installable_power_kw_west", + "ertrag_jahr_mwh_jahr_west": "energy_annual_mwh_west", + "installierte_leistung_kw_flach": "installable_power_kw_flat", + "ertrag_jahr_mwh_jahr_flach": "energy_annual_mwh_flat", + "denkmalschutz_beachten": "historic_preservation", + "grundfl_qm": "building_area_sqm", + "geometry": "geometry"} diff --git a/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/create.smk b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/create.smk new file mode 100644 index 00000000..5c92bbbf --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/create.smk @@ -0,0 +1,36 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import geopandas as gpd + +from digipipe.scripts.geo import ( + rename_filter_attributes, + reproject_simplify, + write_geofile +) +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "rpg_abw_pv_roof_potential") + +rule create: + input: + get_abs_dataset_path( + "raw", "rpg_abw_pv_roof_potential" + ) / "data" / "rpg_abw_pv_roof_potential.gpkg" + output: + DATASET_PATH / "data" / "rpg_abw_pv_roof_potential.gpkg" + run: + data = reproject_simplify( + rename_filter_attributes( + gdf=gpd.read_file(input[0]), + attrs_mapping=config["attributes"] + ) + ) + write_geofile( + gdf=data, + file=output[0], + layer_name=config["layer"], + ) diff --git a/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/data/.gitkeep b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/dataset.md b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/dataset.md new file mode 100644 index 00000000..c79fa3c8 --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_pv_roof_potential/dataset.md @@ -0,0 +1,7 @@ +# Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft, reprojizert. + +Raw dataset: +[rpg_abw_pv_roof_potential](../../raw/rpg_abw_pv_roof_potential/dataset.md) diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/__init__.py b/digipipe/store/preprocessed/rpg_abw_regional_plan/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/config.yml b/digipipe/store/preprocessed/rpg_abw_regional_plan/config.yml new file mode 100644 index 00000000..55f81e65 --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_regional_plan/config.yml @@ -0,0 +1,53 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Sachlicher Teilplan Wind 2018: VR/EG +stp_2018: + layer: "stp_2018_vreg" + attributes: + {"bezeich": "name_1", + "bezeich_2": "name_2", + "bezeich_3": "name_3", + "plan_status": "plan_status", + "d_beschluss": "decision_date", + "d_genehmigung": "authorization_date", + "d_inkraft": "in_force_date", + "geometry": "geometry"} + +# Sachlicher Teilplan Wind 2027: VR/EG +stp_2027: + # Vorranggebiete + vr: + layer: "stp_2027_vr" + attributes: + {"bezeich": "name_1", + "bezeich_2": "name_2", + "bezeich_3": "name_3", + "bezeich_4": "name_4", + "plan_status": "plan_status", + "d_beschluss": "decision_date", + "d_genehmigung": "authorization_date", + "d_inkraft": "in_force_date", + "geometry": "geometry"} + # Repoweringgebiete + repowering: + layer: "stp_2027_repowering" + attributes: + {"bezeich": "name_1", + "bezeich_2": "name_2", + "bezeich_3": "name_3", + "bezeich_4": "name_4", + "plan_status": "plan_status", + "d_beschluss": "decision_date", + "d_genehmigung": "authorization_date", + "d_inkraft": "in_force_date", + "geometry": "geometry"} + # Suchraum Wald + search_area_forest_area: + attributes: + {"geometry": "geometry"} + # Suchraum Offenland + search_area_open_area: + attributes: + {"geometry": "geometry"} diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/create.smk b/digipipe/store/preprocessed/rpg_abw_regional_plan/create.smk new file mode 100644 index 00000000..5479f29e --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_regional_plan/create.smk @@ -0,0 +1,108 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import geopandas as gpd +from digipipe.scripts.geo import ( + rename_filter_attributes, + reproject_simplify, + write_geofile +) +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "rpg_abw_regional_plan") + +rule create_stp_2018: + """ + Sachlicher Teilplan Wind 2018: Preprocess VR/EG + """ + input: + get_abs_dataset_path( + "raw", "rpg_abw_regional_plan") / "data" / "stp_2018_vreg.gpkg" + output: + DATASET_PATH / "data" / "stp_2018_vreg.gpkg" + run: + data = gpd.read_file(input[0]) + data = reproject_simplify( + rename_filter_attributes( + gdf=data, + attrs_mapping=config["stp_2018"]["attributes"], + ) + ) + write_geofile( + gdf=data, + file=output[0], + layer_name=config["stp_2018"]["layer"], + ) + +rule create_stp_2027_plan_intent: + """ + Sachlicher Teilplan Wind 2027: Preprocess plan intent + """ + input: + get_abs_dataset_path( + "raw", "rpg_abw_regional_plan") / "data" / + "stp_2027_ideen_{category}.gpkg" + output: + DATASET_PATH / "data" / "stp_2027_{category}.gpkg", + run: + data = gpd.read_file(input[0]) + data = reproject_simplify( + rename_filter_attributes( + gdf=data, + attrs_mapping=config["stp_2027"]["vr"]["attributes"], + ) + ) + write_geofile( + gdf=data, + file=output[0], + layer_name=config["stp_2027"]["vr"]["layer"], + ) + +rule create_stp_2027_search_area: + """ + Sachlicher Teilplan Wind 2027: Preprocess search areas + """ + input: + get_abs_dataset_path( + "raw", "rpg_abw_regional_plan") / "data" / + "stp_2027_suchraum.gpkg" + output: + forest=DATASET_PATH / "data" / "stp_2027_search_area_forest_area.gpkg", + open_land=DATASET_PATH / "data" / "stp_2027_search_area_open_area.gpkg" + run: + # Forest + data = gpd.read_file( + input[0], + layer="suchraum_wald_03032023", + ) + data = reproject_simplify( + rename_filter_attributes( + gdf=data, + attrs_mapping=config[ + "stp_2027"]["search_area_forest_area"]["attributes"], + ) + ) + write_geofile( + gdf=data, + file=output.forest, + ) + + # Open land + data = gpd.read_file( + input[0], + layer="suchraum_offenland_03032023", + ) + data = data.explode(index_parts=False) + data = reproject_simplify( + rename_filter_attributes( + gdf=data, + attrs_mapping=config[ + "stp_2027"]["search_area_open_area"]["attributes"], + ) + ) + write_geofile( + gdf=data, + file=output.open_land, + ) diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/data/.gitkeep b/digipipe/store/preprocessed/rpg_abw_regional_plan/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/dataset.md b/digipipe/store/preprocessed/rpg_abw_regional_plan/dataset.md new file mode 100644 index 00000000..f39b9434 --- /dev/null +++ b/digipipe/store/preprocessed/rpg_abw_regional_plan/dataset.md @@ -0,0 +1,7 @@ +# Regionalplan Anhalt-Bitterfeld-Wittenberg + +Vorverarbeitete Datensätze aus Teilplänen Wind der Regionalen +Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg aus +[rpg_abw_regional_plan](../../raw/rpg_abw_regional_plan/dataset.md). + +In der [config.yml](config.yml) können Einstellungen vorgenommen werden. diff --git a/digipipe/store/preprocessed/rpg_abw_regional_plan/scripts/create.py b/digipipe/store/preprocessed/rpg_abw_regional_plan/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/seenergies_peta5/__init__.py b/digipipe/store/preprocessed/seenergies_peta5/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/seenergies_peta5/config.yml b/digipipe/store/preprocessed/seenergies_peta5/config.yml new file mode 100644 index 00000000..071de9db --- /dev/null +++ b/digipipe/store/preprocessed/seenergies_peta5/config.yml @@ -0,0 +1,24 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Files to be extracted from raw dataset +files_extract: + residential: + [ + "HD_2015_res_Peta5_0_1_GJ.tfw", + "HD_2015_res_Peta5_0_1_GJ.tif.aux.xml", + "HD_2015_res_Peta5_0_1_GJ.tif.vat.cpg", + "HD_2015_res_Peta5_0_1_GJ.tif", + "HD_2015_res_Peta5_0_1_GJ.tif.ovr", + "HD_2015_res_Peta5_0_1_GJ.tif.vat.dbf", + ] + cts: + [ + "HD_2015_ser_Peta5_0_1_GJ.tfw", + "HD_2015_ser_Peta5_0_1_GJ.tif.aux.xml", + "HD_2015_ser_Peta5_0_1_GJ.tif.vat.cpg", + "HD_2015_ser_Peta5_0_1_GJ.tif", + "HD_2015_ser_Peta5_0_1_GJ.tif.ovr", + "HD_2015_ser_Peta5_0_1_GJ.tif.vat.dbf", + ] diff --git a/digipipe/store/preprocessed/seenergies_peta5/create.smk b/digipipe/store/preprocessed/seenergies_peta5/create.smk new file mode 100644 index 00000000..d479788a --- /dev/null +++ b/digipipe/store/preprocessed/seenergies_peta5/create.smk @@ -0,0 +1,35 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +import zipfile +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "seenergies_peta5", data_dir=True) + +rule extract: + """ + Extract files from archives + """ + input: + residential=get_abs_dataset_path( + "raw", "seenergies_peta5") / "data" / "Peta5_0_1_HD_res.zip", + cts=get_abs_dataset_path( + "raw", "seenergies_peta5") / "data" / "Peta5_0_1_HD_ser.zip" + output: + residential=[DATASET_PATH / f for f in config["files_extract"]["residential"]], + cts=[DATASET_PATH/f for f in config["files_extract"]["cts"]] + params: + outpath=DATASET_PATH, + files_extract_residential=config["files_extract"]["residential"], + files_extract_cts=config["files_extract"]["cts"] + run: + # note: buildin zipfile used as bash unzip doesn't work (pkzip required) + with zipfile.ZipFile(input.residential, "r") as zf: + for f in params.files_extract_residential: + zf.extract(f, params.outpath) + with zipfile.ZipFile(input.cts, "r") as zf: + for f in params.files_extract_cts: + zf.extract(f, params.outpath) diff --git a/digipipe/store/preprocessed/seenergies_peta5/data/.gitkeep b/digipipe/store/preprocessed/seenergies_peta5/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/seenergies_peta5/dataset.md b/digipipe/store/preprocessed/seenergies_peta5/dataset.md new file mode 100644 index 00000000..83683455 --- /dev/null +++ b/digipipe/store/preprocessed/seenergies_peta5/dataset.md @@ -0,0 +1,6 @@ +# sEEnergies Pan-European Thermal Atlas 5.2 (Peta5) + +Wärmebedarf (extrahiert) für Europa 2015 in GJ (1ha Auflösung) für + +- Haushalte: Raumwärme und Warmwasser +- GHD: Raumwärme, Warmwasser und Prozesswärme diff --git a/digipipe/store/preprocessed/stala_st_energy/config.yml b/digipipe/store/preprocessed/stala_st_energy/config.yml new file mode 100644 index 00000000..320e484f --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_energy/config.yml @@ -0,0 +1,15 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Demand industry: Excel file +electricity_demand_ind: + excel_file: + sheet_name: "Stromverbr. nach KR und Jahren" + usecols: "A:T" + skiprows: 2 + nrows: 15 + + # Years to be included (leave empty for all) + years: + [] diff --git a/digipipe/store/preprocessed/stala_st_energy/create.smk b/digipipe/store/preprocessed/stala_st_energy/create.smk new file mode 100644 index 00000000..1b65abc1 --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_energy/create.smk @@ -0,0 +1,42 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" +import pandas as pd + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path( + "preprocessed", "stala_st_energy", data_dir=True +) + +rule extract_power_demand_ind: + """ + Extract industrial electricity demand from Excel file and save to CSV + """ + input: + get_abs_dataset_path("raw", "stala_st_energy") / "data" / + "Stromverbrauch_nach_Kreisen_ab_Jahr_2003.xlsx" + output: DATASET_PATH / "power_demand_industry_st_districts.csv" + run: + data = pd.read_excel( + input[0], + **config["electricity_demand_ind"]["excel_file"], + engine="openpyxl", + ).rename(columns={ + "Kreisfreie Stadt\nLandkreis\n\nLand": "name" + }).set_index("name") + data.columns = [int(_[1]) for _ in data.columns.str.split("\n")] + + # Select desired years + print( + f"Available years for industrial demand: " + f"{data.columns.to_list()}" + ) + years = config["electricity_demand_ind"]["years"] + if len(years) > 0: + data = data[years] + print(f" Selected: {years}") + + data.to_csv(output[0]) diff --git a/digipipe/store/preprocessed/stala_st_energy/data/.gitkeep b/digipipe/store/preprocessed/stala_st_energy/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/stala_st_energy/dataset.md b/digipipe/store/preprocessed/stala_st_energy/dataset.md new file mode 100644 index 00000000..85c3a1b8 --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_energy/dataset.md @@ -0,0 +1,13 @@ +# Energiedaten Sachsen-Anhalt + +Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes +Sachsen-Anhalt, extrahiert und konvertiert. + +## Daten + +Stromverbrauch der Industriebetriebe nach Kreisen 2003-2021 in MWh + +- Datei: `power_demand_industry_st_districts.csv` + +Raw dataset: +[stala_st_energy](../../raw/stala_st_energy/dataset.md) diff --git a/digipipe/store/preprocessed/stala_st_energy/metadata.json b/digipipe/store/preprocessed/stala_st_energy/metadata.json new file mode 100644 index 00000000..86df8626 --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_energy/metadata.json @@ -0,0 +1,5 @@ +{ + "Datenquellen": { + "Stromverbrauch der Industriebetriebe nach Kreisen": "https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986" + } +} diff --git a/digipipe/store/preprocessed/stala_st_energy/scripts/create.py b/digipipe/store/preprocessed/stala_st_energy/scripts/create.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/__init__.py b/digipipe/store/preprocessed/stala_st_pop_prog/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/config.yml b/digipipe/store/preprocessed/stala_st_pop_prog/config.yml new file mode 100644 index 00000000..70541393 --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_pop_prog/config.yml @@ -0,0 +1,18 @@ +############################################################################## +# This file holds configuration parameters for the creation of this dataset. # +############################################################################## + +# Excel file +excel_file: + sheet_name: "7. RBP" + usecols: "A, G:S" + skiprows: 3 + nrows: 230 + dtype: + { + "AGS": str + } + +# Years to be included (leave empty for all) +years: + [] diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/create.smk b/digipipe/store/preprocessed/stala_st_pop_prog/create.smk new file mode 100644 index 00000000..114be7ff --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_pop_prog/create.smk @@ -0,0 +1,17 @@ +""" +Snakefile for this dataset + +Note: To include the file in the main workflow, it must be added to the respective module.smk . +""" + +from digipipe.store.utils import get_abs_dataset_path + +DATASET_PATH = get_abs_dataset_path("preprocessed", "stala_st_pop_prog") + +rule create: + """ + Extract municipality population data from Excel files and save to CSVs + """ + input: get_abs_dataset_path("raw", "stala_st_pop_prog") / "data" / "1_Internettabelle_7RBP_nach_Prognosejahr_Geschlecht_alle_Ebenen.xlsx" + output: DATASET_PATH / "data" / "population_prognosis_st_muns.csv" + script: DATASET_PATH / "scripts" / "create.py" diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/data/.gitkeep b/digipipe/store/preprocessed/stala_st_pop_prog/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/dataset.md b/digipipe/store/preprocessed/stala_st_pop_prog/dataset.md new file mode 100644 index 00000000..b2e56254 --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_pop_prog/dataset.md @@ -0,0 +1,7 @@ +# Bevölkerungsprognose Sachsen-Anhalt + +Bevölkerungsprognose je Gemeinde bis 2035 des Statistischen Landesamtes +Sachsen-Anhalt, extrahiert und konvertiert. + +Raw dataset: +[stala_st_pop_prog](../../raw/stala_st_pop_prog/dataset.md) diff --git a/digipipe/store/preprocessed/stala_st_pop_prog/scripts/create.py b/digipipe/store/preprocessed/stala_st_pop_prog/scripts/create.py new file mode 100644 index 00000000..1579c00f --- /dev/null +++ b/digipipe/store/preprocessed/stala_st_pop_prog/scripts/create.py @@ -0,0 +1,35 @@ +import pandas as pd + + +def process() -> None: + data = pd.read_excel( + snakemake.input[0], + **snakemake.config["excel_file"], + engine="openpyxl", + ).rename(columns={"AGS": "ags"}) + + # Rename columns + data.set_index("ags", inplace=True) + data.columns = [int(col[1]) for col in data.columns.str.split("\n")] + + # Select desired years + print(f"Available years for population prognosis: {data.columns.to_list()}") + years = snakemake.config["years"] + if len(years) > 0: + data = data[years] + print(f" Selected: {years}") + + # Rename columns + data.reset_index(inplace=True) + + # Drop non-municipal data + data = data.loc[data.ags.str.len() > 2] + data = data.assign( + ags=data.ags.str.pad(width=8, side="right", fillchar="0") + ).set_index("ags") + + data.to_csv(snakemake.output[0]) + + +if __name__ == "__main__": + process() diff --git a/digipipe/store/raw/TEMPLATE/data/.gitkeep b/digipipe/store/raw/TEMPLATE/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/TEMPLATE/dataset.md b/digipipe/store/raw/TEMPLATE/dataset.md new file mode 100644 index 00000000..98ede272 --- /dev/null +++ b/digipipe/store/raw/TEMPLATE/dataset.md @@ -0,0 +1,55 @@ +# Name des Datensatzes + +Eine kurze Beschreibung des Datensatzes. +Diese hilft der Dokumentation und bei der Erstellung des Berichts. + +# Notizen (nur zur Information, nicht Teil des dataset.md files) + +Benennungskonvention: `` (Kleinschreibung), z.B. ein +Datensatz zu Naturschutzgebieten des Bundesamtes für Naturschutz (BfN) könnte +`bfn_natural_reserves` heißen. + +Was ist ein Datensatz? Es gibt verschiedene Definitionen, aber in dieser +Pipeline ist ein Datensatz eine Sammlung von Daten, die als eine Einheit +behandelt, welche aus mehreren Dateien bestehen und durch eine einzige +Metadaten-Datei identifiziert werden kann. + +Beispiele: +- [OSM Germany](https://download.geofabrik.de/europe/germany-latest.osm.pbf) +- [ERA5 weather dataset](https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels?tab=overview) +- [BKG administrative areas](https://gdz.bkg.bund.de/index.php/default/verwaltungsgebiete-1-250-000-stand-01-01-vg250-01-01.html) + +Rohdateien kommen in das Verzeichnis `data` und werden nach Möglichkeit nicht +umbenannt. + +## Beschreibung + +Bitte gib zumindest eine kurze Beschreibung: + +- Worum geht es in dem Datensatz +- Gibt es Besonderheiten, die es zu wissen gilt? (neben Metadaten, welche + UNBEDINGT erstellt werden müssen, dazu unten mehr) + +Eine schnelle und suboptimale Beschreibung ist besser als keine. + +## Metadaten + +Füge für jeden Roh-/Originaldatensatz, der erstellt wird, Metadaten zur +Beschreibung der Daten mit maschinenlesbaren Informationen hinzu. +Folge der [OEP](https://openenergy-platform.org/about/) Metadaten v1.5.1. +Es kann der [Metadata creator](https://meta.rl-institut.de/meta_creator/151) +verwendet werden. + +Zum Vergleich die [metadata.json](metadata.json) in diesem Verzeichnis. + +Alternativ kann sie auch manuell erstellt werden: +Folgen Sie +[dieses Beispiel](https://github.com/OpenEnergyPlatform/oemetadata/blob/develop/metadata/latest/example.json) +um zu verstehen, wie die Felder verwendet werden. Die Felder werden in der +[Open Energy Metadata Description](https://github.com/OpenEnergyPlatform/oemetadata/blob/develop/metadata/v141/metadata_key_description.md) +im Detail beschrieben. Bitte überprüfe, ob der Metadatenstring den OEP-Metadaten +Standards entspricht, indem das +[OMI-Tool](https://github.com/OpenEnergyPlatform/omi) verwendet wird. +Wenn der Metadatenstring konform ist, bringt OMI die Schlüssel in die richtige +Reihenfolge und gibt den vollständigen String aus (für den Export die Option +`-o` verwenden). diff --git a/digipipe/store/raw/TEMPLATE/metadata.json b/digipipe/store/raw/TEMPLATE/metadata.json new file mode 100644 index 00000000..0802ee07 --- /dev/null +++ b/digipipe/store/raw/TEMPLATE/metadata.json @@ -0,0 +1,196 @@ +{ + "name": null, + "title": null, + "id": null, + "description": null, + "language": [ + null + ], + "subject": [ + { + "name": null, + "path": null + } + ], + "keywords": [ + null + ], + "publicationDate": null, + "context": { + "homepage": null, + "documentation": null, + "sourceCode": null, + "contact": null, + "grantNo": null, + "fundingAgency": null, + "fundingAgencyLogo": null, + "publisherLogo": null + }, + "spatial": { + "location": null, + "extent": null, + "resolution": null + }, + "temporal": { + "referenceDate": null, + "timeseries": [ + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + }, + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + } + ] + }, + "sources": [ + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ], + "contributors": [ + { + "title": null, + "email": null, + "date": null, + "object": null, + "comment": null + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [ + { + "name": null, + "description": null, + "type": null, + "unit": null, + "isAbout": [ + { + "name": null, + "path": null + } + ], + "valueReference": [ + { + "value": null, + "name": null, + "path": null + } + ] + }, + { + "name": null, + "description": null, + "type": null, + "unit": null, + "isAbout": [ + { + "name": null, + "path": null + } + ], + "valueReference": [ + { + "value": null, + "name": null, + "path": null + } + ] + } + ], + "primaryKey": [ + null + ], + "foreignKeys": [ + { + "fields": [ + null + ], + "reference": { + "resource": null, + "fields": [ + null + ] + } + } + ] + }, + "dialect": { + "delimiter": null, + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": null, + "review": { + "path": null, + "badge": null + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/ageb_energy_balance/data/.gitkeep b/digipipe/store/raw/ageb_energy_balance/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/ageb_energy_balance/dataset.md b/digipipe/store/raw/ageb_energy_balance/dataset.md new file mode 100644 index 00000000..8a90c7fa --- /dev/null +++ b/digipipe/store/raw/ageb_energy_balance/dataset.md @@ -0,0 +1,5 @@ +# AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021 + +Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie +zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und +Anwendungszwecken für Jahre von 2011 bis 2021 der AG Energiebilanzen. diff --git a/digipipe/store/raw/ageb_energy_balance/metadata.json b/digipipe/store/raw/ageb_energy_balance/metadata.json new file mode 100644 index 00000000..fae573f4 --- /dev/null +++ b/digipipe/store/raw/ageb_energy_balance/metadata.json @@ -0,0 +1,78 @@ +{ + "name": "ageb_energy_balance", + "title": "AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021", + "id": "ageb_energy_balance", + "description": "Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und Anwendungszwecken für Jahre von 2011 bis 2021", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Endenergiesektoren", + "Anwendungsbilanzen", + "energy-balance" + ], + "publicationDate": "2022-12-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-01", + "timeseries": null + }, + "sources": [ + { + "title": "AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021", + "description": "Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und Anwendungszwecken für Jahre von 2011 bis 2021", + "path": "https://ag-energiebilanzen.de/daten-und-fakten/anwendungsbilanzen/", + "licenses": null + } + ], + "licenses": null, + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": null, + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/ba_employment/data/.gitkeep b/digipipe/store/raw/ba_employment/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/ba_employment/dataset.md b/digipipe/store/raw/ba_employment/dataset.md new file mode 100644 index 00000000..88d837d5 --- /dev/null +++ b/digipipe/store/raw/ba_employment/dataset.md @@ -0,0 +1,5 @@ +# Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gemeindedaten der sozialversicherungspflichtig Beschäftigten am 30.06.2022 nach +Wohn- und Arbeitsort - Deutschland, Länder, Kreise und Gemeinden (Jahreszahlen) +der Bundesagentur für Arbeit. diff --git a/digipipe/store/raw/ba_employment/metadata.json b/digipipe/store/raw/ba_employment/metadata.json new file mode 100644 index 00000000..802556f6 --- /dev/null +++ b/digipipe/store/raw/ba_employment/metadata.json @@ -0,0 +1,113 @@ +{ + "name": "ba_employment", + "title": "Gemeindedaten der sozialversicherungspflichtig Beschäftigten nach Wohn- und Arbeitsort", + "id": "ba_employment", + "description": "Zahl der soziaversicherungspflichtig Beschäftigten nach: Wohnort, Personengruppen, Arbeitsort, Wohnort gleich Arbeitsort, Einpendeler, Auspendler, Zahl der Betriebe", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Gemeindedaten", + "sozialversicherungspflichtig", + "Beschäftigte", + "Wohnort", + "Arbeitsort" + ], + "publicationDate": "2023-01-16", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-23", + "timeseries": null + }, + "sources": [ + { + "title": "Gemeindedaten der sozialversicherungspflichtig Beschäftigten nach Wohn- und Arbeitsort", + "description": "Zahl der soziaversicherungspflichtig Beschäftigten nach: Wohnort, Personengruppen, Arbeitsort, Wohnort gleich Arbeitsort, Einpendeler, Auspendler, Zahl der Betriebe", + "path": "https://statistik.arbeitsagentur.de/SiteGlobals/Forms/Suche/Einzelheftsuche_Formular.html?nn=15024&topic_f=beschaeftigung-sozbe-gemband", + "licenses": [ + { + "name": null, + "title": null, + "path": "https://statistik.arbeitsagentur.de/DE/Statischer-Content/Servicebereich-Navigation/Bezugsbedingungen.html?nn=6654", + "instruction": "Sie können Informationen speichern, (auch auszugsweise) mit Quellenangabe weitergeben, vervielfältigen und verbreiten. Die Inhalte dürfen nicht verändert oder verfälscht werden. Eigene Berechnungen sind erlaubt, jedoch als solche kenntlich zu machen. Im Falle einer Zugänglichmachung im Internet soll dies in Form einer Verlinkung auf die Homepage der Statistik der Bundesagentur für Arbeit erfolgen. Die Nutzung der Inhalte für gewerbliche Zwecke, ausgenommen Presse, Rundfunk und Fernsehen und wissenschaftliche Publikationen, bedarf der Genehmigung durch die Statistik der Bundesagentur für Arbeit.", + "attribution": "© Statistik der Bundesagentur für Arbeit" + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": "https://statistik.arbeitsagentur.de/DE/Statischer-Content/Servicebereich-Navigation/Bezugsbedingungen.html?nn=6654", + "instruction": "Sie können Informationen speichern, (auch auszugsweise) mit Quellenangabe weitergeben, vervielfältigen und verbreiten. Die Inhalte dürfen nicht verändert oder verfälscht werden. Eigene Berechnungen sind erlaubt, jedoch als solche kenntlich zu machen. Im Falle einer Zugänglichmachung im Internet soll dies in Form einer Verlinkung auf die Homepage der Statistik der Bundesagentur für Arbeit erfolgen. Die Nutzung der Inhalte für gewerbliche Zwecke, ausgenommen Presse, Rundfunk und Fernsehen und wissenschaftliche Publikationen, bedarf der Genehmigung durch die Statistik der Bundesagentur für Arbeit.", + "attribution": "© Statistik der Bundesagentur für Arbeit" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/bkg_vg250/__init__.py b/digipipe/store/raw/bkg_vg250/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/bkg_vg250/data/.gitkeep b/digipipe/store/raw/bkg_vg250/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/bkg_vg250/dataset.md b/digipipe/store/raw/bkg_vg250/dataset.md new file mode 100644 index 00000000..7edec535 --- /dev/null +++ b/digipipe/store/raw/bkg_vg250/dataset.md @@ -0,0 +1,3 @@ +# Verwaltungsgebiete Deutschlands + +Verwaltungsgebiete Deutschlands (Verwaltungsgebiete 1:250 000). diff --git a/digipipe/store/raw/bkg_vg250/metadata.json b/digipipe/store/raw/bkg_vg250/metadata.json new file mode 100644 index 00000000..6cee4857 --- /dev/null +++ b/digipipe/store/raw/bkg_vg250/metadata.json @@ -0,0 +1,102 @@ +{ + "name": "bkg_vg250", + "title": "Adminstrative areas of Germany", + "id": "bkg_vb250", + "description": "Geopackage with administative areas of Germany - Verwaltungsgebiete 1:250 000", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "adminstrative areas", + "Verwaltungsgebiete" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "1:250 000" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": null + }, +"sources": [ + { + "title": "Bundesamt für Kartographie und Geodäsie - Verwaltungsgebiete 1:250 000 VG250 (Ebenen)", + "description": "Dieser Datensatz stellt die Verwaltungsgebiete 1:250 000 (VG250) mit Stand 01.01. für das Gebiet der Bundesrepublik Deutschland bereit.", + "path": "https://gdz.bkg.bund.de/index.php/default/digitale-geodaten/verwaltungsgebiete/verwaltungsgebiete-1-250-000-stand-01-01-vg250-01-01.html", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland – Namensnennung – Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": " © GeoBasis-DE / BKG - 2022" + } + ] + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-23", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/bmwk_long_term_scenarios/data/.gitkeep b/digipipe/store/raw/bmwk_long_term_scenarios/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/bmwk_long_term_scenarios/dataset.md b/digipipe/store/raw/bmwk_long_term_scenarios/dataset.md new file mode 100644 index 00000000..23a4f2ed --- /dev/null +++ b/digipipe/store/raw/bmwk_long_term_scenarios/dataset.md @@ -0,0 +1,44 @@ +# BMWK Langfristszenarien + +Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten +auf Deutschlandebene. + +Die Daten wurden über den +[Szenario Explorer](https://langfristszenarien.de/enertile-explorer-de/szenario-explorer/) +abgerufen. + +## Verwendete Szenarien + +- **T45-Strom:** Stromfokussiertes Szenario aus den T45-Szenarien aus 2023, die + Wege zur Treibhausgasneutralität bis 2045 unter Einhaltung aktueller + politischer Vorgaben erreichen. Die Daten dieses Szenarios werden als + Grundlage für das Zielszenario in der Region verwendet. +- **TN-Strom:** Stromfokussiertes Szenario aus den TN-Szenarien aus 2021, die + unterschiedliche Pfade für Deutschland mit dem Ziel treibhausgasneutral bis + 2050 zu werden. Die Daten dieses Szenarios werden als Grundlage für den + Status quo verwendet (Ausnahme: Erzeugung Wärmenetze, hier wurden manuell + Daten für 2021 ergänzt). + +## Daten + +### T45-Strom + +| Datensatz | Quelle | Datei | +|------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------| +| Gebäude: Haushalte und GHD Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/51944/21559a9532131c061668bf0751e519e3) | `T45-Strom_buildings_heating_demand_by_carrier.csv` | +| Gebäude: Anzahl der Heizungen nach Technologie | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/51944/21559a9532131c061668bf0751e519e3) | `T45-Strom_buildings_heating_structure_by_technology.csv` | +| GHD Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52700/c6980ea467bb26a922d34617b4fd4798) | `T45-Strom_cts_demand.csv` | +| Haushalte Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52700/c6980ea467bb26a922d34617b4fd4798) | `T45-Strom_hh_demand.csv` | +| Industrie Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52612/9de48084ac2d54c418daaf02a6ee26e0) | `T45-Strom_ind_demand.csv` | +| Stromsystem Deutschland Leistung | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/48766/5c11999a03c547e04e73d61e4b5fc633) | `T45-Strom_electricity_installed_power.csv` | +| Erzeugung Wärmenetze Deutschland | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/49949/cf898070daec6a4e613dc889927a5feb), [Link2](https://static.agora-energiewende.de/fileadmin/Projekte/2022/2022-11_DE_Large_Scale_Heatpumps/A-EW_293_Rollout_Grosswaermepumpen_WEB.pdf) (S. 37) | `T45-Strom_Generation_Heatgrids_Germany.csv` | + +### TN-Strom + +| Datensatz | Quelle | Datei | +|------------------------------------------------|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| Gebäude: Haushalte und GHD Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8198/698cee83d667a2f44fdea7e78ee799a2) | `TN-Strom_buildings_heating_demand_by_carrier.csv` | +| Gebäude: Anzahl der Heizungen nach Technologie | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8198/698cee83d667a2f44fdea7e78ee799a2) | `TN-Strom_buildings_heating_structure_by_technology.csv` | +| GHD Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8660/ae5a14ff0c320cbd31c5eeff2ede54ba) | `TN-Strom_cts_demand.csv` | +| Haushalte Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8660/ae5a14ff0c320cbd31c5eeff2ede54ba) | `TN-Strom_hh_demand.csv` | +| Industrie Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/29085/084bd7f45f40d31fd53341e6a94f532c) | `TN-Strom_ind_demand.csv` | diff --git a/digipipe/store/raw/bmwk_long_term_scenarios/metadata.json b/digipipe/store/raw/bmwk_long_term_scenarios/metadata.json new file mode 100644 index 00000000..b97a11e9 --- /dev/null +++ b/digipipe/store/raw/bmwk_long_term_scenarios/metadata.json @@ -0,0 +1,133 @@ +{ + "name": "bmwk_long_term_scenarios", + "title": "BMWK Langfristszenarien", + "id": "bmwk_long_term_scenarios", + "description": "Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten auf Deutschlandebene.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "BMWK", + "Langfristszenario", + "T45-Strom", + "TN-Strom" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": null, + "timeseries": [ + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + }, + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + } + ] + }, + "sources": [ + { + "title": "BMWK Langfristszenarien", + "description": "Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten auf Deutschlandebene.", + "path": "https://langfristszenarien.de/enertile-explorer-de/szenario-explorer/", + "licenses": null + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.6.0", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/bnetza_mastr/data/.gitkeep b/digipipe/store/raw/bnetza_mastr/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/bnetza_mastr/dataset.md b/digipipe/store/raw/bnetza_mastr/dataset.md new file mode 100644 index 00000000..3b9b7255 --- /dev/null +++ b/digipipe/store/raw/bnetza_mastr/dataset.md @@ -0,0 +1,20 @@ +# Erzeugungsanlagen aus Marktstammdatenregister + +Ereugungsanlagen aus dem Markstammdatenregister, das mit dem Tool +[open-mastr](https://github.com/OpenEnergyPlatform/open-MaStR) erstellt und +abgelegt wurde. Die Daten wurden folgendermaßen erstellt: +``` +from open_mastr import Mastr +db = Mastr() +db.download("bulk") +db.to_csv(None) # (None for all data) +``` + +Die abgelegten CSV-Dateien (alle Tabellen) wurden um einen benutzerdefinierten +Export von Speichereinheiten mit +`sqlite3 -header -csv -separator "," open-mastr.db "select * from storage_units;" > bnetza_mastr_storage_unit_raw.csv` +erweitert. Anschließend wurden alle Dateien komprimiert. + +Das Marktstammdatenregister (MaStR) ist ein deutsches Register, welches von der +Bundesnetzagentur (BNetza) bereitgestellt wird und alle in Deutschland +befindlichen Strom- und Gasanlagen erfasst. diff --git a/digipipe/store/raw/bnetza_mastr/metadata.json b/digipipe/store/raw/bnetza_mastr/metadata.json new file mode 100644 index 00000000..ad353d26 --- /dev/null +++ b/digipipe/store/raw/bnetza_mastr/metadata.json @@ -0,0 +1,112 @@ +{ + "name": "bnetza_mastr", + "title": "Marktstammdatenregisterdaten", + "id": "bnetza_mastr", + "description": "Daten aus dem Marktstammdatenregister der Bundesnetzagentur", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Markstammdatenregister", + "openmastr", + "mastr" + ], + "publicationDate": "2022-12-19", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-19", + "timeseries": null + }, + "sources": [ + { + "title": "Marktstammdatenregister", + "description": "Marktstammdatenregister der Bundesnetzagentur Deutschland", + "path": "https://www.marktstammdatenregister.de/MaStR/Datendownload", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland – Namensnennung – Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Marktstammdatenregister 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland – Namensnennung – Version 2.0?" , + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Marktstammdatenregister 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/bnetza_mastr_correction_region/data/.gitkeep b/digipipe/store/raw/bnetza_mastr_correction_region/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/bnetza_mastr_correction_region/dataset.md b/digipipe/store/raw/bnetza_mastr_correction_region/dataset.md new file mode 100644 index 00000000..734e6ef5 --- /dev/null +++ b/digipipe/store/raw/bnetza_mastr_correction_region/dataset.md @@ -0,0 +1,30 @@ +# Marktstammdatenregister Datenkorrektur PV + +Überprüfung und manuelle Datenkorrektur der Photovoltaikanlagen aus dem +prozessierten Marktstammdatenregister (Datensatz: +[bnetza_mastr](../bnetza_mastr/dataset.md)). + +## Plausibiltätsprüfung + +Um grobe Fehler herauszufiltern wird überprüft, ob + +- Anlage in Betrieb ist (status = "In Betrieb"), +- Anlage Strom produziert, +- Brutto- und Nettokapazität plausibel sind und +- die Kategorisierung, d.h. Zuordnung eine PV-Anlage zu Freifläche oder Dach, + plausibel ist (manuelle, visuelle Prüfung von geolokalisierten + PV-Aufdachanlagen anhand von + [Orthofotos](https://www.geodatenportal.sachsen-anhalt.de/wss/service/ST_LVermGeo_DOP_WMS_OpenData/guest)) + +## Dateien + +- Korrektur Freiflächenanlagen `bnetza_mastr_pv_ground_region_correction.ods` +- Korrektur Aufdachanlagen `bnetza_mastr_pv_roof_region_correction.ods` + +mit Spalten: + +- _mastr_id_: ID aus dem MaStR +- _reason_: Fehler (wrong_type, wrong_position) +- _wrong_attr_: Fehlerhaftes Attribut +- _correction_: Korrigierter Attributwert (None, wenn Korrektur nicht möglich). + Korrigierte Geometrien liegen in EPSG:3035 vor. diff --git a/digipipe/store/raw/bnetza_mastr_correction_region/metadata.json b/digipipe/store/raw/bnetza_mastr_correction_region/metadata.json new file mode 100644 index 00000000..b36e1d6d --- /dev/null +++ b/digipipe/store/raw/bnetza_mastr_correction_region/metadata.json @@ -0,0 +1,112 @@ +{ + "name": "bnetza_mastr_correction", + "title": "Marktstammdatenregisterdaten - Manuelle Korrektur", + "id": "bnetza_mastr", + "description": "Daten aus dem Marktstammdatenregister der Bundesnetzagentur", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Markstammdatenregister", + "openmastr", + "mastr" + ], + "publicationDate": "2022-12-19", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-19", + "timeseries": null + }, + "sources": [ + { + "title": "Marktstammdatenregister", + "description": "Marktstammdatenregister der Bundesnetzagentur Deutschland", + "path": "https://www.marktstammdatenregister.de/MaStR/Datendownload", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland – Namensnennung – Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Marktstammdatenregister 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland – Namensnennung – Version 2.0?" , + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Marktstammdatenregister 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/dbfz_biomass_heat_capacities/data/.gitkeep b/digipipe/store/raw/dbfz_biomass_heat_capacities/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/dbfz_biomass_heat_capacities/dataset.md b/digipipe/store/raw/dbfz_biomass_heat_capacities/dataset.md new file mode 100644 index 00000000..aaf19ff9 --- /dev/null +++ b/digipipe/store/raw/dbfz_biomass_heat_capacities/dataset.md @@ -0,0 +1,38 @@ +# Installierte Leistungen von Biomasse-Konversionstechnologien + +Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad +und 2,6 Mio. ha Anbauflächen im Jahr 2020 und 2050 der Tabelle 13 im +Dokument +["Technoökonomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)"](../dbfz_biomass_heat_capacities/metadata.json) +für die folgenden Konversionsanlagen von Biomasse entnommen: + +- Biomethan-Blockheizkraftwerk +- Holzhackschnitzelkessel Sektor Industrie +- Pelletkessel Sektor GHD +- Holzhackschnitzelkessel Sektor GHD +- Scheitholzvergaserkessel +- Pelletkessel Sektor Gebäude +- Biogasanlage + Blockheizkraftwerk +- Biomethan Gas- und Dampfkombikraftwerk +- Klärschlammfaulung + Blockheizkraftwerk +- Papier-Zellstoff-KWK +- Holzvergaser + Blockheizkraftwerk +- Mikro-Holzgas-Blockheizkraftwerk + +Die Konversionstechnologien sind in der Spalte "technology" gelistet, während +sich ihre installierten Leistungen für die beiden Projektionsjahre in den +Spalten "capacity_[MW]_2020" und "capacity_[MW]_2050" befinden. + +In den Spalten "decentral" und "central" wird mit "x" angegeben, ob jeweils ein +dezentraler und zentraler Einsatz der Konversionsanlage Stand der Technik ist. + +In der Spalte "carrier" wird analog zur Konvention der Namensgebung im +Energiesystem (siehe [esys.md](../../../../docs/sections/esys.md)) der +jeweilige in die Konversionsanlage eintretende Energieträger notiert. +Diese werden Abbildung 3 des Dokuments entommen. Der Energieträger Schwarzlauge +wird vereinfachend dem Energieträger feste Biomasse bzw. Holz zugeordnet. +Klärgas und Holzgas werden vereinfachend Biogas zugeordnet. + +In der Spalte "tech" findet die Zuordnung zu der Technologie anhand der im +Energiesystem verwendeten Komponenten (siehe +[esys.md](../../../../docs/sections/esys.md)) statt. diff --git a/digipipe/store/raw/dbfz_biomass_heat_capacities/metadata.json b/digipipe/store/raw/dbfz_biomass_heat_capacities/metadata.json new file mode 100644 index 00000000..59f5bddb --- /dev/null +++ b/digipipe/store/raw/dbfz_biomass_heat_capacities/metadata.json @@ -0,0 +1,95 @@ +{ + "name": "dbfz_biomass_heat_capacities", + "title": "Technoökonomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)", + "id": "dbfz_biomass_heat_capacities", + "description": "Installierte Leistungen von Biomasse-Konversionstechnologien. Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad", + "language": [ + "de-DE" + ], + "subject": [], + "keywords": [ + "Biomasse", + "Biomassepotential", + "Analyse", + "Transformationspfade", + "TATBIO" + ], + "publicationDate": "2019-05-08", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2019-04-30", + "timeseries" : null + }, + "sources": [ + { + "title": "Technoökonomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)", + "description": "Installierte Leistungen von Biomasse-Konversionstechnologien. Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad", + "path": "https://www.ufz.de/export/data/2/231891_technooekonomische-analyse-und-transformationspfade-des-energetischen-biomassepotentials(1).pdf", + "licenses": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } +}, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/demandregio/data/.gitkeep b/digipipe/store/raw/demandregio/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/demandregio/dataset.md b/digipipe/store/raw/demandregio/dataset.md new file mode 100644 index 00000000..108b85b4 --- /dev/null +++ b/digipipe/store/raw/demandregio/dataset.md @@ -0,0 +1,207 @@ +# DemandRegio + +Regionalisierte Bevölkerungsprognose, Haushalte sowie Strom- und Gasbedarfe +inkl. Zeitreihen auf Landkreisebene. + +Die Daten wurden abgerufen mit einer +[modifizierten Version des DemandRegio disaggregators](https://github.com/nesnoj/disaggregator), +in der softwareseitige, jedoch keine methodischen Änderungen vorgenommen wurden. + +Der disaggregator basiert auf Daten bis 2017, anschließende Jahre werden +fortgeschrieben. + +Weitere Informationen zum Projekt DemandRegio: + +- [Abschlussbericht](https://www.ffe.de/wp-content/uploads/2020/10/DemandRegio_Abschlussbericht.pdf) +- [Abschlussworkshop](https://www.tu.berlin/er/forschung/projekte/demandregio-2) + +Die erzeugten Rohdaten wie unten beschrieben wurden mittels +[API](http://opendata.ffe.de:4000/) abgerufen. Diese können alternativ direkt +vom [OpenData-Portal der FfE](https://opendata.ffe.de/project/demandregio/) +bezogen werden. + +Verwendetes Wetterjahr für Gasbedarfszeitreihen: 2011 + +**Installation (in separater venv):** + +```commandline +pip install disaggregator@git+https://github.com/nesnoj/disaggregator.git#egg=disaggregator +``` + +## Details zum Datenabruf + +### Bevölkerung + +Bevölkerung (Summe) und Bevölkerung je Haushaltsgröße (1, 2, 3, 4, 5, >5) je +NUTS3. + +Jahre: + +- Bevölkerung bis 2017 historische Werte +- Bevölkerung ab 2018 prognostizierte Werte basierend auf der 14. koordinierten + Bevölkerungsvorausberechnung der Statistischen Ämter von Bund und Ländern. +- Haushalte nur 2011 + +```python +import pandas as pd +from disaggregator import data + +# Population +dr_hh_population = pd.DataFrame() +for year in [2010, 2015, 2017, 2020, 2021, 2022, 2025, 2030, 2035, 2040, 2045]: + dr_hh_population[year] = round(data.population(year=year)).astype(int) + +dr_hh_population.to_csv("dr_hh_population.csv") + +# Households +data.households_per_size().to_csv("dr_hh_households_2011.csv") +``` + +### Haushalte: Strom + +Bedarfe und SLP-Zeitreihen je NUTS3 mit Bottom-Up-Methode nach Haushaltsgröße. + +Jahre: + +- 2017: Letzte verfügbare Daten +- 2022: Status quo, Fortschreibung mit Berücksichtigung Demografie und + Wanderung +- 2035: Fortschreibungsjahr mit Berücksichtigung Demografie und Wanderung +- 2045: Fortschreibungsjahr + +```python +from disaggregator import spatial, temporal + +# Consumption +spatial.disagg_households_power( + by="households", + weight_by_income=True, + year=2022, + scale_by_pop=True, +).to_csv(f"dr_hh_power_demand_2022.csv") + +# Timeseries +temporal.disagg_temporal_power_housholds_slp( + use_nuts3code=True, + by="households", + weight_by_income=True, + year=2022, + scale_by_pop=True, +).to_csv(f"dr_hh_power_timeseries_2022.csv") +``` + +### Haushalte: Gas + +Zeitreihen je NUTS3 + +```python +from disaggregator import temporal + +# Timeseries +temporal.disagg_temporal_gas_households( + use_nuts3code=True, + how='top-down', + year=2011, +).to_csv(f"dr_hh_gas_timeseries_2011.csv") +``` + + +### GHD und Industrie: Strom + +Bedarfe und Zeitreihen je NUTS3: + +- Bedarfe: Je Wirtschaftszweig (WZ), abzüglich Eigenerzeugung +- Zeitreihen: Für alle WZ bedarfsgewichtet aggregiert, Einzelprofile basieren + je nach WZ auf gemessenen oder SLP inkl. Wanderung +- Letzte verfügbare Daten aus 2017, Fortschreibung für 2022 mit + Berücksichtigung Beschäftigte und Effizienzgewinne + +```python +from disaggregator import spatial, temporal + +####### +# CTS # +####### + +# Consumption +spatial.disagg_CTS_industry( + sector='CTS', + source='power', + use_nuts3code=True, + year=2022, +).to_csv("dr_cts_power_demand_2022.csv") +# Timeseries +temporal.disagg_temporal_power_CTS( + detailed=False, + use_nuts3code=True, + year=2022, +).to_csv("dr_cts_power_timeseries_2022.csv") + +############ +# Industry # +############ + +# Consumption +spatial.disagg_CTS_industry( + sector='industry', + source='power', + use_nuts3code=True, + year=2022, +).to_csv("dr_ind_power_demand_2022.csv") +# Timeseries +temporal.disagg_temporal_industry( + source="power", + detailed=False, + use_nuts3code=True, + no_self_gen=False, + year=2022, +).to_csv("dr_ind_power_timeseries_2022.csv") +``` + +### GHD: Gas + +Zeitreihen je NUTS3 für alle WZ bedarfsgewichtet aggregiert, Einzelprofile +basieren je nach WZ auf gemessenen oder SLP inkl. Wanderung. Letzte verfügbare +Daten aus 2017, Fortschreibung für 2022 mit Berücksichtigung Beschäftigte und +Effizienzgewinne. + +```python +from disaggregator import spatial, temporal + +# Timeseries +x=temporal.disagg_temporal_gas_CTS( + detailed=False, + use_nuts3code=True, + year=2011, +).to_csv("dr_cts_gas_timeseries_2011.csv") +``` + +### Industrie: Gas + +Bedarfe und Zeitreihen je NUTS3: + +- Bedarfe: Je Wirtschaftszweig (WZ), abzüglich Eigenerzeugung +- Zeitreihen: Für alle WZ bedarfsgewichtet aggregiert, Einzelprofile basieren + je nach WZ auf gemessenen oder SLP inkl. Wanderung +- Letzte verfügbare Daten aus 2017, Fortschreibung für 2022 mit + Berücksichtigung Beschäftigte und Effizienzgewinne + +```python +from disaggregator import spatial, temporal + +# Consumption +spatial.disagg_CTS_industry( + sector='industry', + source='gas', + use_nuts3code=True, + year=2022, +).to_csv("dr_ind_gas_demand_2022.csv") +# Timeseries +x=temporal.disagg_temporal_industry( + source="gas", + detailed=False, + use_nuts3code=True, + no_self_gen=False, + year=2011, +).to_csv("dr_ind_gas_timeseries_2011.csv") +``` diff --git a/digipipe/store/raw/demandregio/metadata.json b/digipipe/store/raw/demandregio/metadata.json new file mode 100644 index 00000000..628e4194 --- /dev/null +++ b/digipipe/store/raw/demandregio/metadata.json @@ -0,0 +1,150 @@ +{ + "name": "demandregio", + "title": "DemandRegio", + "id": "demandregio", + "description": "Regionalisierte Bevölkerungsprognose, Haushalte sowie Strom- und Gasbedarfe inkl. Zeitreihen auf Landkreisebene.", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Bevölkerung", + "Bevölkerungsprognose", + "Strombedarf", + "Gasbedarf", + "Haushalte", + "disaggregator", + "disaggregation" + ], + "publicationDate": "2020-09-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": [ + { + "start": "2022-01-01T00:00+01", + "end": "2022-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2035-01-01T00:00+01", + "end": "2035-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2045-01-01T00:00+01", + "end": "2045-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2022-01-01T00:00+01", + "end": "2022-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + } + ] + }, +"sources": [ + { + "title": "DemandRegio", + "description": "Regionalisierte Bevölkerungsprognose, Haushalte sowie Strom- und Gasbedarfe inkl. Zeitreihen auf Landkreisebene.", + "path": "https://github.com/nesnoj/disaggregator/", + "licenses": [ + { + "name": "CC BY 4.0 DE", + "title": "Creative Commons Namensnennung 4.0 Deutschland", + "path": "https://creativecommons.org/licenses/by/4.0/deed.de", + "instruction": "Sie müssen angemessene Urheber- und Rechteangaben machen, einen Link zur Lizenz beifügen und angeben, ob Änderungen vorgenommen wurden. Diese Angaben dürfen in jeder angemessenen Art und Weise gemacht werden, allerdings nicht so, dass der Eindruck entsteht, der Lizenzgeber unterstütze gerade Sie oder Ihre Nutzung besonders.", + "attribution": "© FZJ, TUB, FfE" + }, + { + "name": "GNU FDL" , + "title": "GNU General Public License v3.0", + "path": "https://www.gnu.org/licenses/gpl-3.0.en.html", + "instruction": "Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed", + "attribution": "© FZJ, TUB, FfE" + } + ] + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/destatis_gv/data/.gitkeep b/digipipe/store/raw/destatis_gv/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/destatis_gv/dataset.md b/digipipe/store/raw/destatis_gv/dataset.md new file mode 100644 index 00000000..438fd699 --- /dev/null +++ b/digipipe/store/raw/destatis_gv/dataset.md @@ -0,0 +1,3 @@ +# Bevölkerung + +Einwohnerzahl nach Gemeinden des Statistischen Bundesamts. diff --git a/digipipe/store/raw/destatis_gv/metadata.json b/digipipe/store/raw/destatis_gv/metadata.json new file mode 100644 index 00000000..22e76b0c --- /dev/null +++ b/digipipe/store/raw/destatis_gv/metadata.json @@ -0,0 +1,110 @@ +{ + "name": "destatis_gv", + "title": "Adminstratives Gemeinndeverzeichnis", + "id": "destatis_gv", + "description": "Alle politisch selbständigen Gemeinden mit ausgewählten Merkmalen am 31.12.2022 ", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "destatis", + "gemeindeverzeichnis" + ], + "publicationDate": "2023-01-12", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2022-02-14", + "timeseries": null + }, + "sources": [ + { + "title": "Statistisches Bundesamt", + "description": "Alle politisch selbständigen Gemeineden mit ausgewählten Merkmalen am 31.12.2022 (4.Quartal)", + "path": "https://www.destatis.de/DE/Themen/Laender-Regionen/Regionales/Gemeindeverzeichnis/Administrativ/Archiv/GVAuszugQ/AuszugGV4QAktuell.html", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Statistisches Bundesamt (Destatis), 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Statistisches Bundesamt (Destatis), 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/dwd_temperature/data/.gitkeep b/digipipe/store/raw/dwd_temperature/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/dwd_temperature/dataset.md b/digipipe/store/raw/dwd_temperature/dataset.md new file mode 100644 index 00000000..f1dbd8df --- /dev/null +++ b/digipipe/store/raw/dwd_temperature/dataset.md @@ -0,0 +1,23 @@ +# Temperatur + +Stündliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen +Wetterdienstes +([Climate Data Center](https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/)) +für das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt +[WindNODE](https://windnode-abw.readthedocs.io/en/latest/energy_system_model.html#energy-demand-today). + +Werte: + +- `temp_amb`: Lufttemperatur in 2 m Höhe +- `temp_soil`: Erdbodentemperatur in 1 m Tiefe + +Verwendete Stationen: + +- Wittenberg +- Köthen +- Jessnitz +- Seehausen +- Holzdorf + +Die Zuordnung der Stationsmesswerte zu Gemeinden erfolgte über die jeweils +nächstgelegene Wetterstation. diff --git a/digipipe/store/raw/dwd_temperature/metadata.json b/digipipe/store/raw/dwd_temperature/metadata.json new file mode 100644 index 00000000..f358c0e9 --- /dev/null +++ b/digipipe/store/raw/dwd_temperature/metadata.json @@ -0,0 +1,118 @@ +{ + "name": "dwd_temperature", + "title": "temperatur_2011", + "id": "dwd_temperature", + "description": "Stündliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen Wetterdienstes (Climate Data Center) für das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt WindNODE.", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "Wetter", + "Temperatur" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Anhalt-Bitterfeld-Wittenberg", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-05-23", + "timeseries": [ + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": "left", + "aggregationType": null + } + ] + }, +"sources": [ + { + "title": "temperatur_2011", + "description": "Stündliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen Wetterdienstes (Climate Data Center) für das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt WindNODE", + "path": "https://www.dwd.de/DE/leistungen/cdc/climate-data-center.html", + "licenses": [ + { + "name": "GeoNutzV", + "title": "Verordnung zur Festlegung der Nutzungsbestimmungen für die Bereitstellung von Geodaten des Bundes", + "path": "https://www.gesetze-im-internet.de/geonutzv/GeoNutzV.pdf", + "instruction": "Alle frei zugänglichen Geodaten und Geodatendienste dürfen entsprechend der Verordnung zur Festlegung der Nutzungsbestimmungen für die Bereitstellung von Geodaten des Bundes (GeoNutzV) unter Beigabe eines Quellenvermerks ohne Einschränkungen weiterverwendet werden.", + "attribution": " © Deutscher Wetterdienst" + } + ] + } + ], + "licenses": [ + { + "name": "GeoNutzV", + "title": "Verordnung zur Festlegung der Nutzungsbestimmungen für die Bereitstellung von Geodaten des Bundes", + "path": "https://www.gesetze-im-internet.de/geonutzv/GeoNutzV.pdf", + "instruction": "Alle frei zugänglichen Geodaten und Geodatendienste dürfen entsprechend der Verordnung zur Festlegung der Nutzungsbestimmungen für die Bereitstellung von Geodaten des Bundes (GeoNutzV) unter Beigabe eines Quellenvermerks ohne Einschränkungen weiterverwendet werden.", + "attribution": " © Deutscher Wetterdienst" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "aaron.schilling@rl-institut.de", + "date": "2023-08-25", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/emissions/data/.gitkeep b/digipipe/store/raw/emissions/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/emissions/dataset.md b/digipipe/store/raw/emissions/dataset.md new file mode 100644 index 00000000..ca296b17 --- /dev/null +++ b/digipipe/store/raw/emissions/dataset.md @@ -0,0 +1,238 @@ +# Emissionen + +Emissionen für die Jahre 1990 und 2019 für Sachsen-Anhalt und disaggregiert für +die Region Anhalt-Bitterfeld-Wittenberg (ABW). Die Grundlage hierfür ist der +[THG-Bericht 2021](https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/221014_THG-Bericht.pdf) +Sachsen-Anhalt (ST). + +Datei: `emissions.csv`, Felder: + +- `sector`: Sektor +- `cat`: Kategorie ("*" = alle) +- `subcat`: Unterkategorie ("*" = alle) +- `name`: Bezeichner +- `st`: Emissionen Sachsen-Anhalt in kt CO2-Äquivalent +- `abw`: Emissionen Region ABW in kt CO2-Äquivalent + +`sector`, `cat` und `subcat` folgen der Nomenklatur des Common Reporting Formats +(CRF) nach [KSG Anlage 1](https://www.gesetze-im-internet.de/ksg/anlage_1.html). +[Grafik hierzu](https://expertenrat-klima.de/content/uploads/2023/05/ERK2023_Pruefbericht-Emissionsdaten-des-Jahres-2022.pdf) +(Abb. 2 auf S. 30). + +## Disaggregation + +Anhand unterschiedlicher Kriterien und Datenquellen wurde näherungsweise von den +vorliegenden Emissionen für Sachsen-Anhalt für 1990 und 2019 auf die Region ABW +disaggregiert. Je Sektor sind hier die gewählten +**energiebestimmenden Größen (EnbG)** angegeben, sowie die Herangehensweise zur +jeweiligen Berechnung. + +### Sektor Energiewirtschaft (CRF 1.A.1 + 1.B) + +Aus der Liste der +[Emissionshandelspflichtigen Anlagen](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +wurden jene Daten zu Anlagen extrahiert, welche sich in Sachsen-Anhalt befinden +und als Bezeichnung "Energieumwandlung >= 50 MW FWL" oder "Energieumwandlung +20–50 MW FWL" (Haupttätigkeit nach TEHG) aufweisen. +Die Summe der angegebenen Emissionen (t CO2 Äq) jener Anlagen, welche in der +Region ABW liegen, wurde in Relation zu der Summe der Emissionen aus den Anlagen +in Gesamt-ST gesetzt. Dieser Anteil wurde auf die im THG-Bericht angegebene +Emissionsmenge im Sektor "Energiewirtschaft (1.A.1)" sowie "Prozessemissionen +(1.B)" angelegt und so für ABW näherungsweise disaggregiert. + +Hinweise: + +- Aufgrund mangelnder Daten wurde für das Jahr 1990 auf die neuesten verfügbaren + Daten (2005-2007) aus der Anlagenliste zurückgegriffen. +- Energiewirtschaftlich relevante Anlagen unter 20 MW FWL sind in der + Anlagenliste nicht erfasst und konnten somit nicht berücksichtigt werden. + +Quellen: + +- [Emissionshandelspflichtige Anlagen in Deutschland 2020 (Stand 03.05.2021)](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +- [Treibhausgasemissionen in Sachsen-Anhalt 2018 (Stand 12.05.2021)](https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/THG_Bericht_2018.pdf) + +#### CRF 1.A.1 + +Energiewirtschaft (Umwandlungsbereich): umfasst die öffentliche Elektrizitäts- +und Wärmeversorgung sowie Raffinerien. + +EnbG: Emissionen aus europäischem Emissionshandel + +#### CRF 1.B + +Diffuse Emissionen aus Brennstoffen: Diese Kategorie beinhaltet flüchtige +Emissionen aus der Gewinnung, Verarbeitung und Verteilung von Brennstoffen. Die +wichtigsten Quellen sind die Verteilung von Erdgas, aber auch Emissionen aus +Förderung und Abfackelung, die Extraktion und Umwandlung von Braunkohle, +Emissionen aus der Raffination von Erdöl sowie Emissionen aus der Lagerung und +Verteilung von Mineralölprodukten. + +EnbG: Emissionen aus europäischem Emissionshandel + +### Sektor Industrie (CRF 1.A.2) + +Dieser Sektor umfasst sämtliche energiebedingten Emissionen durch verarbeitendes +Gewerbe. + +Zur Disaggregierung wurde der Energieverbrauch der Industriebetriebe in ABW mit +dem Gesamtenergieverbrauch aller Industriebetriebe in Sachsen-Anhalt in Relation +gesetzt. Dabei wurde eine Differenzierung hinsichtlich der +Energieträgerzusammensetzung von ABW im Vergleich zu ST durchgeführt und anhand +von Emissionsfaktoren berechnet. + +EnbG: Energieverbrauch nach Energieträgern + +Quellen: + +- [Energieverbrauch der Industriebetriebe in Sachsen-Anhalt nach ausgewählten Energieträgern und Kreisen](https://statistik.sachsen-anhalt.de/fileadmin/Bibliothek/Landesaemter/StaLa/startseite/Themen/Energie/Tabellen/Energieverwendung/Energieverbrauch_nach_Kreisen_ab_dem_Jahr_2010.xlsx) +- [Emissionsfaktor für Stromerzeugung (UBA)](https://www.umweltbundesamt.de/sites/default/files/medien/479/bilder/dateien/entwicklung_der_spezifischen_emissionen_des_deutschen_strommix_1990-2020_und_erste_schaetzungen_2021.pdf) +- [BISKO Bilanzierungs-Systematik Kommunal (Aktualisierung 11/2019)](https://www.ifeu.de/fileadmin/uploads/BISKO_Methodenpapier_kurz_ifeu_Nov19.pdf) + +### Sektor Prozessemissionen (CRF 2) + +Dieser Sektor umfasst sämtliche Emissionen, welche durch Industrieprozesse +anfallen. Dies sind Emissionen aus: Herstellung mineralischer Produkte, +chemischer Industrie, Herstellung von Metallen, übrigen Prozessen und +Produktverwendungen (CRF 2.A-H). +Zur Disaggregierung wurde erneut die +[Liste der Emissionshandelspflichtigen Anlagen](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +herangezogen. Anders als im Sektor Energiewirtschaft (s.o.) wurde jedoch der +Anteil aller Anlagen, welche nicht der Energiewirtschaft zugerechnet werden, zur +Bestimmung des Anteils von ABW an ST gewählt. + +EnbG: Emissionen aus europäischem Emissionshandel + +### Sektor Verkehr (CRF 1.A.3) + +Dieser Sektor umfasst Emissionen aus dem Straßenverkehr, dem zivilen +Luftverkehr, aus dem Schiffsverkehr, verbrennungsbedingte Emissionen aus dem +Schienenverkehr sowie Emissionen des übrigen Verkehrs und weitere Quellen zur +Bereitstellung der im Verkehr verbrauchten Energie. Die Verbrennung von +Mineralölprodukten im Straßenverkehr spielt die größte Rolle und macht weit über +90 % der sektoralen Emissionen aus. Daher wird zur Disaggreagation der +motorisierte Straßenverkehr über zugelassene Kraftfahrzeuge mit +durchschnittlichen Fahrleistungen und spezifischer Emissionen pro Kilometer und +Fahrzeugklasse herangezogen. + +Hierfür wird zunächst aus +[Verkehr in Kilometern (VK) ZeitreiheJahre 2014 - 2022](https://www.kba.de/DE/Statistik/Kraftverkehr/VerkehrKilometer/vk_inlaenderfahrleistung/vk_inlaenderfahrleistung_node.html;jsessionid=DD419FD0604C0BCC72A9E4533BB0319F.live21324) +und +[Umweltfreundlich mobil! Ein ökologischer Verkehrsartenvergleich für den Personen- und Güterverkehr in Deutschland)](https://www.umweltbundesamt.de/sites/default/files/medien/5750/publikationen/2021_fb_umweltfreundlich_mobil_bf.pdf) +ein durchschnittlicher Emissionswert pro Jahr und Fahrzeugklasse ermittelt. +Dieser wird mit den zugelassenen Fahrzeugen der entsprechenden Fahrzeugklassen +aus +[Kraftfahrzeugbestand nach Kraftfahrzeugarten - Stichtag 01.01. - regionale Tiefe: Kreise und krfr. Städte (bis 01.01.2019)](https://www-genesis.destatis.de/genesis//online?operation=table&code=46251-0001&bypass=true&levelindex=0&levelid=1691405772899#abreadcrumb) +einerseits für ganz Sachsen-Anhalt und andererseits ABW multipliziert. Daraus +wird ein Verhältnis der Verkehrsemissionen in ABW zu ST gewonnen. + +Hinweise: + +- Die Datenlage für die zugelassenen Fahrzeuge, gefahrenen Kilometer und + Emissionen pro km sind nicht spezifisch für 1990 sondern nur für einzelne + Jahre der frühen 1990er verfügbar. Daher ist der Emissionswert für 1990 mit + einer höheren Unsicherheit behaftet. + +EnbG: + +- Zugelassene Kraftfahrzeuge +- Durchschnittliche Fahrleistung und spez. CO2 Emission pro km und + Fahrzeugklasse + +Quellen: + +- [Kraftfahrzeugbestand nach Kraftfahrzeugarten - Stichtag 01.01. - regionale Tiefe: Kreise und krfr. Städte (bis 01.01.2019)](https://www-genesis.destatis.de/genesis//online?operation=table&code=46251-0001&bypass=true&levelindex=0&levelid=1691405772899#abreadcrumb) +- [Umweltfreundlich mobil! Ein ökologischer Verkehrsartenvergleich für den Personen- und Güterverkehr in Deutschland)](https://www.umweltbundesamt.de/sites/default/files/medien/5750/publikationen/2021_fb_umweltfreundlich_mobil_bf.pdf) +- [Verkehr in Kilometern (VK) ZeitreiheJahre 2014 - 2022](https://www.kba.de/DE/Statistik/Kraftverkehr/VerkehrKilometer/vk_inlaenderfahrleistung/vk_inlaenderfahrleistung_node.html;jsessionid=DD419FD0604C0BCC72A9E4533BB0319F.live21324) + +### Sektor Sonstige Energie (insbes. Gebäude) (CRF 1.A.4 + 1.A.5) + +Dieser Sektor umfasst den durch Energieumwandlung nicht bereits abgedeckten +Energiebedarf. Das sind vor allem kleine Einzelfeuerungsanlagen bis hin zu +immissionsschutzrechtlich genehmigungsbedürftigen Anlagen mit einer +Nennwärmeleistung von mehreren Megawatt. Zur Disaggreagtion wurde daher der +Wärmebedarf von ABW im Verhältnis zum Wärmebedarf von gesamt Sachsen Anhalt +gewählt. Der Wärmevedarf umfasst Raumwärme, Warmwasser sowie Kochen und wird aus +Daten aus dem Pipeline-Datensatz +[demand_heat_region](../../datasets/demand_heat_region/dataset.md) generiert. + +Ergebnis: 17,46 % des Bedarfs in Sachsen-Anhalt entfällt auf ABW. + +Code +``` +# Sektor HH +heat_hh_dist_states = gpd.read_file("demand_heat_zonal_stats-res-bkg_vg250_federal_states.gpkg") +heat_hh_demand_st = float(heat_hh_dist_states.loc[heat_hh_dist_states.nuts == "DEE"].heat_demand) +heat_hh_demand_abw = gpd.read_file("demand_heat_zonal_stats-res-bkg_vg250_muns_region.gpkg").heat_demand.sum() + +# Sektor GHD +heat_cts_dist_states = gpd.read_file("demand_heat_zonal_stats-ser-bkg_vg250_federal_states.gpkg") +heat_cts_demand_st = float(heat_cts_dist_states.loc[heat_cts_dist_states.nuts == "DEE"].heat_demand) +heat_cts_demand_abw = gpd.read_file("demand_heat_zonal_stats-ser-bkg_vg250_muns_region.gpkg").heat_demand.sum() + +# Anteil ABW an ST +heat_share = (heat_hh_demand_abw + heat_cts_demand_abw) / (heat_hh_demand_st + heat_cts_demand_st) +``` + +EnbG: Wärmebedarf aus Energiesystem + +### Sektor Landwirtschaft (CRF 3) + +Der Sektor umfasst Emissionen aus der Viehwirtschaft und der Bewirtschaftung von +Böden. Daher werden zunächst die Emissionsunterkategorien 3.A-J der +Viehwirtschaft oder der Bewirtschaftung von Böden zugeordnet. Anschließend +werden diese getrennt nach den Viehbeständen bzw. der landwirtschaftlich +genutzen Fläche disaggreiert. + +#### CRF 3.A - Landwirtschaft – Fermentation + +Emissionen durch Fermentation (CRF 3.A) entstehen vorrangig durch +Verdauungsprozesse in der Viehwirtschaft. Deswegen wird der Anteil ABWs an +diesen Emissionen durch die Viehbestände abgeschätzt. + +Hinweis: + +- Die Viehbestände für 1990 sind nicht bekannt, es wird stattdessen auf die + Viehbestände von 1996 zurückggegriffen. + +EnbG: Viehbestände + +Quelle: + +- [Viehbestand der landwirtschaftlichen Betriebe in Großvieheinheiten (GV) nach Jahren und Kreisen)](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/land-und-forstwirtschaft-fischerei/tabellen-viehwirtschaft-und-tierische-erzeugnisse#c234218) + +##### CRF 3.B-J + +In den Unterkategorien 3.C-J ist eine Proportionalität der Emissionen und der +landwirtschafltich genutzen Fläche zu erwarten. Unterkategorie 2.B +"Wirtschaftsdüngerausbringung (ohne Gärreste)" ist allerdings ein Grenzfall, da +er aus Abfällen der Tierhaltung produziert wird und bereits hierbei +Treibhausgase entstehen, diese aber nicht vor Ort eingesetzt werden müssen, +sondern auf beliebigen landwirtschafltichen Flächen eingesetzt werden kann. +Daher wird hier auch diese Unterkategorie der Landnutzung zugeordnet. + +Hinweis: + +- die Flächenntuzungsdaten gehen nicht bis 1990 zurück, ändern sich über die + Jahre aber nur marginal, sodass hier nur von geringen Abweichungen auszugehen + ist. + +EnbG: Landwirtschaftlich genutzte Fläche + +Quelle: + +- [Flaeche_nach_Kultuarten_nach_Jahren_und_Kreisen](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/land-und-forstwirtschaft-fischerei/tabellen-bodennutzung-und-anbau) + +### Sektor Abfall und Abwasser (CRF 5) + +Dieser Sektor besteht vor allem aus Emissionen aus Abfalldeponien, welche der +Zersetzung organischer Materialien in Deponien entstehen. Es wird angenommen, +dass der Abfall aus Produktionsprozessen gegenüber den Abfällen aus Konsum +vernachlässigbar sind, weswegen eine Disaggregation auf Grundlage der +Bevölkerung von ABW vorgenommen wird. + +EnbG: Bevölkerung + +Quelle: + +- [Bevölkerung nach Geschlecht in den Gemeinden](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=12411-0001&bypass=true&levelindex=0&levelid=1691507280245#abreadcrumb) diff --git a/digipipe/store/raw/emissions/metadata.json b/digipipe/store/raw/emissions/metadata.json new file mode 100644 index 00000000..9afd4d47 --- /dev/null +++ b/digipipe/store/raw/emissions/metadata.json @@ -0,0 +1,6 @@ +{ + "Daten Sachsen-Anhalt": "https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/221014_THG-Bericht.pdf", + "Datensätze Desaggregation": { + "Industrie": "" + } +} diff --git a/digipipe/store/raw/eurostat_lau/data/.gitkeep b/digipipe/store/raw/eurostat_lau/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/eurostat_lau/dataset.md b/digipipe/store/raw/eurostat_lau/dataset.md new file mode 100644 index 00000000..8e6f679d --- /dev/null +++ b/digipipe/store/raw/eurostat_lau/dataset.md @@ -0,0 +1,5 @@ +# Lokale Verwaltungseinheiten + +Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU +sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der +Europäischen Union. diff --git a/digipipe/store/raw/eurostat_lau/metadata.json b/digipipe/store/raw/eurostat_lau/metadata.json new file mode 100644 index 00000000..d29a7266 --- /dev/null +++ b/digipipe/store/raw/eurostat_lau/metadata.json @@ -0,0 +1,111 @@ +{ + "name": "eurostat_lau", + "title": "Lokale Verwaltungseinheiten", + "id": "eurostat_lau", + "description": "Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der Europäischen Union", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "Verwaltungseinheiten", + "NUTS", + "LAU" + ], + "publicationDate": "2022-12-15", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-06-30", + "timeseries": null + }, + "sources": [ + { + "title": "Lokale Verwaltungseinheiten", + "description": "Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der Europäischen Union", + "path": "https://ec.europa.eu/eurostat/de/web/nuts/local-administrative-units", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© eurostat, 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© eurostat, 2023" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-25", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/osm/data/.gitkeep b/digipipe/store/raw/osm/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/osm/dataset.md b/digipipe/store/raw/osm/dataset.md new file mode 100644 index 00000000..e5179f87 --- /dev/null +++ b/digipipe/store/raw/osm/dataset.md @@ -0,0 +1,7 @@ +# OpenStreetMap + +OpenStreetMap Datenauszug Deutschland. + +Quelle: https://download.geofabrik.de/europe/germany-230101.osm.pbf + +Ist nicht Teil des Eingangsdaten-Packages - manueller Download erforderlich. diff --git a/digipipe/store/raw/osm/metadata.json b/digipipe/store/raw/osm/metadata.json new file mode 100644 index 00000000..aa93741c --- /dev/null +++ b/digipipe/store/raw/osm/metadata.json @@ -0,0 +1,111 @@ +{ + "name": "openstreetmap", + "title": "", + "id": "openstreetmap", + "description": "OpenStreetMap extract", + "language": [ + "de-DE", + "en-GB" + ], + "subject": [], + "keywords": [ + "openstreetmap", + "osm" + ], + "publicationDate": "2023-06-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2023-06-30", + "timeseries": [] + }, + "sources": [ + { + "title": "OpenStreetMap Data Extracts (Geofabrik)", + "description": "Full data extract of OpenStreetMap data", + "path": "https://download.geofabrik.de/europe/germany-230101.osm.pbf", + "licenses": [ + { + "name": "ODbL-1.0", + "title": "Open Data Commons Open Database License 1.0", + "path": "https://opendatacommons.org/licenses/odbl/1.0/", + "instruction": "You are free: To Share, To Create, To Adapt; As long as you: Attribute, Share-Alike, Keep open!", + "attribution": "© OpenStreetMap contributors" + } + ] + } + ], + "licenses": [ + { + "name": "ODbL-1.0", + "title": "Open Data Commons Open Database License 1.0", + "path": "https://opendatacommons.org/licenses/odbl/1.0/", + "instruction": "You are free: To Share, To Create, To Adapt; As long as you: Attribute, Share-Alike, Keep open!", + "attribution": "© OpenStreetMap contributors" + } + ], + "contributors": [ + { + "title": "nesnoj", + "email": "jonathan.amme@rl-institut.de", + "date": "2023-06-30", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "oemetadata_v1.5.1", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/regiostat/data/.gitkeep b/digipipe/store/raw/regiostat/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/regiostat/dataset.md b/digipipe/store/raw/regiostat/dataset.md new file mode 100644 index 00000000..89b6be58 --- /dev/null +++ b/digipipe/store/raw/regiostat/dataset.md @@ -0,0 +1,80 @@ +# Regionalstatistik (GENESIS) + +Enthält folgende Datensätze der statistischen Ämter des Bundes und der Länder: + +## Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4) + +Jahreserhebung ü. die Energieverwendung der Betriebe im verarbeitendem Gewerbe. + +Der Datensatz umfasst: + +- Betriebe des Verarbeitenden Gewerbes sowie des Bergbaus +und der Gewinnung von Steinen und Erden von Unternehmen des +Produzierenden Gewerbes mit im Allgemeinen 20 und mehr +Beschäftigten. +- Betriebe des Verarbeitenden Gewerbes sowie des Bergbaus +und der Gewinnung von Steinen und Erden mit im Allgemeinen +20 und mehr Beschäftigten von Unternehmen der übrigen +Wirtschaftsbereiche. +Die Berichterstattung schließt Verarbeitende Betriebe des +Handwerks ein. +Bei 7 Wirtschaftszweigen gilt eine Abschneidegrenze von 10 +Beschäftigten. Die Merkmalswerte beziehen sich auf den +gesamten Betrieb, schließen damit die nicht produzierenden +Betriebsteile mit ein. +Maßgebend für die Zuordnung ist ab 2008 die „Klassifikation +der Wirtschaftszweige, Ausgabe 2008 (WZ 2008)“, und zwar +die Abschnitte B und C. + +- Datei: `43531-01-02-4.xlsx` +- Stand: 2021 + +## Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5) + +Jahreserhebung ü. Betriebe, tätige Personen und Bruttoentgelte der Betriebe im +verarbeitendem Gewerbe. + +Der Datensatz umfasst: + +- Sämtliche Betriebe des Wirtschaftsbereiches Verarbeitendes +Gewerbe sowie Bergbau und Gewinnung von Steinen und Erden, +wenn diese Betriebe zu Unternehmen des Bereiches +Verarbeitendes Gewerbe sowie Bergbau und Gewinnung von +Steinen und Erden gehören und in diesen Unternehmen +mindestens 20 Personen tätig sind; +- die Betriebe des Wirtschaftsbereiches Verarbeitendes +Gewerbe sowie Bergbau und Gewinnung von Steinen und Erden +mit mindestens 20 tätigen Personen, sofern diese Betriebe +zu Unternehmen gehören, deren wirtschaftlicher Schwerpunkt +außerhalb des Bereiches Verarbeitendes Gewerbe sowie +Bergbau und Gewinnung von Steinen und Erden liegt. +Bei 7 kleinbetrieblich strukturierten Branchen gilt eine +untere Erfassungsgrenze von 10 tätigen Personen. +Die Auswahl erfolgt jeweils nach dem Beschäftigtenstand Ende +September des Vorjahres. Die ausgewiesene Beschäftigtenzahl +betrifft dagegen die von Ende September des Berichtsjahres. +Die Merkmalswerte beziehen sich auf den gesamten Betrieb, +schließen damit die nicht produzierenden Betriebsteile mit +ein. +Maßgebend für die Zuordnung ist ab 2009 die „Klassifikation +der Wirtschaftszweige, Ausgabe 2008 (WZ 2008)“, und zwar +die Abschnitte B und C. + +- Datei: `42111-01-04-5.xlsx` +- Stand: 30.09.2021 + +## Gebäude mit Wohnraum nach Heizungsart (31211-04-01-5-B) + +Zensus 2011: Gebäude mit Wohnraum nach Heizungsart + +- Datei: `31211-04-01-5-B.xlsx` +- Stand: 09.05.2011 + +## Gebäude mit Wohnraum nach Heizungsart (31231-02-01-5) + +Bestand an Wohngebäuden und Wohnungen in Wohn- und Nichtwohngebäuden - +Fortschreibung auf Basis der endgültigen Ergebnisse der Gebäude- und +Wohnungszählung 2011 (Zensus 2011). + +- Datei: `31231-02-01-5.xlsx` +- Stand: 31.12.2021 diff --git a/digipipe/store/raw/regiostat/metadata.json b/digipipe/store/raw/regiostat/metadata.json new file mode 100644 index 00000000..aa6b8315 --- /dev/null +++ b/digipipe/store/raw/regiostat/metadata.json @@ -0,0 +1,130 @@ +{ + "name": "regiostat", + "title": "Regionalstatistik (GENESIS)", + "id": "regiostat", + "description": "Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4), Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5), Gebäude mit Wohnraum nach Heizungsart (31211-04-01-5-B), Gebäude mit Wohnraum nach Heizungsart (31231-02-01-5)", + "language": [ + "de-DE" + ], + "subject": null, + + "keywords": [ + "Regionalstatistik", + "Energieverwendung", + "verarbeitendes Gewerbe", + "tätige Personen", + "Bruttoentgelte", + "Gebäude", + "Heizungsart" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2021-01-01", + "timeseries": null + }, + "sources": [ + { + "title": "Regionalstatistik (GENESIS)", + "description": "Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4), Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5), Gebäude mit Wohnraum nach Heizungsart (31211-04-01-5-B), Gebäude mit Wohnraum nach Heizungsart (31231-02-01-5)", + "path": ["https://www.regionalstatistik.de/genesis//online?operation=table&code=43531-01-02-4", "https://www.regionalstatistik.de/genesis//online?operation=table&code=42111-01-04-5"], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Statistische Ämter des Bundes und der Länder, 2023" + } + ] + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/renewables.ninja_feedin/data/.gitkeep b/digipipe/store/raw/renewables.ninja_feedin/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/renewables.ninja_feedin/dataset.md b/digipipe/store/raw/renewables.ninja_feedin/dataset.md new file mode 100644 index 00000000..bfd272c3 --- /dev/null +++ b/digipipe/store/raw/renewables.ninja_feedin/dataset.md @@ -0,0 +1,221 @@ +# EE-Einspeisezeitreihen + +Einspeisezeitreihen für Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. +Als Wetterjahr wird 2011 verwendet, siehe +[Szenarien](../../../../docs/sections/scenarios.md). + +## Windenergie + +Stündlich aufgelöste Zeitreihe der Windenergie Einspeisung über 1 Jahr auf Basis +von [MaStR](../bnetza_mastr/dataset.md) und +[renewables.ninja](http://renewables.ninja). +Auf einen Auflösung auf Gemeindeebene wird verzichtet, da die Differenz der +Produktion der Gemeinden nach renewables.ninja <5 % beträgt. + +### Windenergieanlage (2022) + +Für renewables.ninja sind Position (lat, lon), Nennleistung (capacity), +Nabenhöhe und Turbinentyp erforderlich. + +#### Position + +Hierfür wird aus den Zentroiden der Gemeinden ein räumlicher Mittelwert +anhand des Datensatzes +[bkg_vg250_muns_region](../../datasets/bkg_vg250_muns_region/dataset.md) +(`bkg_vg250_muns_region.gpkg`) gebildet: + +``` +import geopandas as gpd +import os.path + +def get_position(gdf): + df = gpd.read_file(gdf) + points_of_muns = df["geometry"].centroid + points_of_muns_crs = points_of_muns.to_crs(4326) + point_df = [ + points_of_muns_crs.y.sum()/len(points_of_muns), + points_of_muns_crs.x.sum()/len(points_of_muns) + ] + return point_df + +data_folder = os.path.join("your_data_folder") +muns_gpkg = os.path.join(data_folder, "bkg_vg250_muns_region.gpkg") +center_position = get_position(muns_gpkg) +``` + +#### Nennleistung + +Wird auf 1 MW gesetzt/normiert. + +#### Nabenhöhe + +Aus dem Datensatz +[bnetza_mastr_wind_region](../../datasets/bnetza_mastr_wind_region/dataset.md) +(`bnetza_mastr_wind_agg_abw.gpkg`) wird ein Mittelwer von 100 m abgeleitet. + +``` +import geopandas as gpd + +df = gpd.read_file("bnetza_mastr_wind_agg_abw.gpkg") +height = df[["hub_height"]].mean() +``` + +#### Turbinentyp + +Annahme: Innerhalb eines Herstellers sind Leistungskurven sehr ähnlich. +Daher werden zwei größten Hersteller mit jeweiligen häufigsten Turbinentyp +ausgewählt - diese sind Enercon und Vestas mit ca. 70 % und ca. 30%. + +``` +import geopandas as gpd + +df = gpd.read_file("bnetza_mastr_wind_agg_abw.gpkg") +manufacturers = df[ + ["manufacturer_name", "status"] +].groupby("manufacturer_name").count().sort_values( + by="status", ascending=False +) +``` + +Häufigste Turbinentypen sind *Enercon E-70* und *Vestas V80*. Daher werden +*Enercon E70 2000* und *Vestas V80 2000* in renewables.ninja ausgewählt. + +``` +man_1 = manufacturers.index[0] +man_2 = manufacturers.index[1] + +type_1 = df[ + ["manufacturer_name", "type_name", "status"] +].where(df["manufacturer_name"] == man_1).groupby( + "type_name").count().sort_values(by="status", ascending=False) + +type_2 = df[ + ["manufacturer_name", "type_name", "status"] +].where(df["manufacturer_name"] == man_2).groupby( + "type_name").count().sort_values(by="status", ascending=False) +``` + +### Raw Data von [renewables.ninja](http://renewables.ninja) API + +Es werden zwei Zeitreihen für oben beschriebenen Vergleichsanlagen berechnet: + +``` +import json +import requests +import pandas as pd +import geopandas as gpd + +def change_wpt(position, capacity, height, turbine): + args = { + 'lat': 51.8000, # 51.5000-52.0000 + 'lon': 12.2000, # 11.8000-13.1500 + 'date_from': '2011-01-01', + 'date_to': '2011-12-31', + 'capacity': 1000.0, + 'height': 100, + 'turbine': 'Vestas V164 7000', + 'format': 'json', + 'local_time': 'true', + 'raw': 'false', + } + + args['capacity'] = capacity + args['height'] = height + args['lat'] = position[0] + args['lon'] = position[1] + args['turbine'] = turbine + + return args + +def get_df(args): + token = 'Please get your own' + api_base = 'https://www.renewables.ninja/api/' + + s = requests.session() + # Send token header with each request + s.headers = {'Authorization': 'Token ' + token} + + url = api_base + 'data/wind' + + r = s.get(url, params=args) + + parsed_response = json.loads(r.text) + df = pd.read_json( + json.dumps(parsed_response['data']),orient='index') + metadata = parsed_response['metadata'] + return df + +enercon_production = get_df(change_wpt( + position, + capacity=1, + height=df[["hub_height"]].mean(), + turbine=enercon) +) + +vestas_production = get_df(change_wpt( + position, + capacity=1000, + height=df[["hub_height"]].mean(), + turbine=vestas) +) +``` + +### Gewichtung und Skalierung der Zeitreihen + +Um die Charakteristika der beiden o.g. Anlagentypen zu berücksichtigen, erfolgt +eine gewichtete Summierung der Zeitreihen anhand der berechneten Häufigkeit. + +### Zukunftsszenarien + +Analog zu dem oben beschriebenen Vorgehen wird eine separate Zeitreihe für +zukünftige WEA berechnet. Hierbei wird eine Enercon E126 6500 mit einer +Nabenhöhe von 159 m angenommen +([PV- und Windflächenrechner](https://zenodo.org/record/6794558)). + +Da die Zeitreihe sich nur marginal von der obigen Status-quo-Zeitreihe +unterscheidet, wird letztere sowohl für den Status quo als auch die +Zukunftsszenarien verwendet. + +- Einspeisezeitreihe: `wind_feedin_timeseries.csv` + +## Freiflächen-Photovoltaik + +### PV-Anlage (2022) + +Stündlich aufgelöste Zeitreihe der Photovoltaikeinspeisung über 1 Jahr auf Basis +von [MaStR](../bnetza_mastr/dataset.md) und +[renewables.ninja](http://renewables.ninja). +Wie bei der Windeinspeisung wird auf eine Auflsöung auf Gemeindeebene aufgrund +geringer regionaler Abweichungen verzichtet. + +Für die Generierung der Zeitreihe über +[renewables.ninja](http://renewables.ninja) +wird eine Position(lat, lon), Nennleistung (capacity), Verluste (system_loss) +Nachführung (tracking), Neigung (tilt) und der Azimutwinkel (azim) benötigt. + +Als Position wird analog zur Windenergieanlage der räumlicher Mittelwert +verwendet. Laut MaStR werden lediglich 13 Anlagen nachgeführt (0,01 % der +Kapazität), die Nachführung wird daher vernachlässigt. Die Neigung ist aus MaStR +nicht bekannt, es dominieren jedoch Anlagen auf Freiflächen sowie Flachdächern +im landwirtschaftlichen Kontext. Nach +[Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) +wird diese mit 30° angenommen. +Die Nennleistung Wird auf 1 MW gesetzt/normiert. + +### Zukunftsszenarien + +Die Status-quo-Zeitreihe wird sowohl für den Status quo als auch die +Zukunftsszenarien verwendet. + +- Einspeisezeitreihe: `pv_feedin_timeseries.csv` + +## Solarthermie + +- Einspeisezeitreihe: `st_feedin_timeseries.csv` (Kopie von + PV-Einspeisezeitreihe) + +## Laufwasserkraft + +Hier wird eine konstante Einspeisung angenommen. + +- Einspeisezeitreihe: `ror_feedin_timeseries.csv` diff --git a/digipipe/store/raw/renewables.ninja_feedin/metadata.json b/digipipe/store/raw/renewables.ninja_feedin/metadata.json new file mode 100644 index 00000000..fed94b1b --- /dev/null +++ b/digipipe/store/raw/renewables.ninja_feedin/metadata.json @@ -0,0 +1,119 @@ +{ + "name": "renewables.ninja_feedin", + "title": "EE-Einspeisezeitreihen", + "id": "renewables.ninja_feedin", + "description": "Einspeisezeitreihen für Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. Als Wetterjahr wird 2011 verwendet", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Erneuerbare", + "Energien", + "Einspeisezeitreihen", + "renewables.ninja" + ], + "publicationDate": "2016-09-21", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-04-14", + "timeseries": [ + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": "left" + } + ] + }, + "sources": [ + { + "title": "renewables.ninja_feedin", + "description": "Einspeisezeitreihen für Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. Als Wetterjahr wird 2011 verwendet", + "path": "hhttps://www.renewables.ninja/", + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "© www.renewables.ninja, 2023" + } + ] + } + ], + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "© www.renewables.ninja, 2023" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } diff --git a/digipipe/store/raw/rli_pv_wfr/data/.gitkeep b/digipipe/store/raw/rli_pv_wfr/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/rli_pv_wfr/dataset.md b/digipipe/store/raw/rli_pv_wfr/dataset.md new file mode 100644 index 00000000..36a125e6 --- /dev/null +++ b/digipipe/store/raw/rli_pv_wfr/dataset.md @@ -0,0 +1,14 @@ +# Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/). + +Mehr Informationen: + +- [Begleitdokument](https://zenodo.org/record/6794558) +- [Geodaten Potenzialflächen](https://zenodo.org/record/6728382) + +Enthält: + +- Geodaten +- Metadaten +- App-Datapackage diff --git a/digipipe/store/raw/rli_pv_wfr/metadata.json b/digipipe/store/raw/rli_pv_wfr/metadata.json new file mode 100644 index 00000000..410f25c2 --- /dev/null +++ b/digipipe/store/raw/rli_pv_wfr/metadata.json @@ -0,0 +1,119 @@ +{ + "name": "rli_pv_wfr", + "title": "Geodaten PV- und Windflächenrechner", + "id": "rli_pv_wfr", + "description": "Geodaten aus dem PV- und Windflächenrechner", + "language": [ + "de-DE" + ], + "subject": null, + + "keywords": [ + "Geodaten", + "PV-Flächenrechner", + "Windflächenrechner", + "Potentialflächen" + ], + "publicationDate": "2022-06-05", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-05", + "timeseries": null + }, + "sources": [ + { + "title": "Geodaten PV- und Windflächenrechner", + "description": "Geodaten aus dem PV- und Windflächenrechner", + "path": "https://zenodo.org/record/6728382", + "licenses": null + }, + { + "title": null, + "description": null, + "path": null, + "licenses":[ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "© Reiner Lemoine INstitut, 2022" + } + ] + } + ], + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "© Reiner Lemoine INstitut, 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/rpg_abw_pv_roof_potential/data/.gitkeep b/digipipe/store/raw/rpg_abw_pv_roof_potential/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/rpg_abw_pv_roof_potential/dataset.md b/digipipe/store/raw/rpg_abw_pv_roof_potential/dataset.md new file mode 100644 index 00000000..4ed0119a --- /dev/null +++ b/digipipe/store/raw/rpg_abw_pv_roof_potential/dataset.md @@ -0,0 +1,19 @@ +# Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft. + +Dafür wurden auf Basis des +[Digitalen Oberflächenmodells (DOM2)](https://www.lvermgeo.sachsen-anhalt.de/de/dom2-landesweit.html) +Schattenberechnungen durchgeführt. Anhand des +[LoD2 3D-Gebäudemodells](https://www.lvermgeo.sachsen-anhalt.de/de/download_lod2.html) +wurden für verschiedene Dachausrichtungen (nord, ost, süd, west, flach) die +installierbare Leistung bestimmt und mittels der Globalstrahlung und typischer +technischer Parameter für jedes Gebäude und jede Dachflächenorientierung +potenzielle Erträge berechnet. + +Quellen: + +- [Hauptseite](https://www.planungsregion-abw.de/geodaten/) +- [Geodaten](https://gis-entwicklung2.planungsregion-abw.de/geoserver/wfs?SERVICE=WFS&REQUEST=GetCapabilities) +- [Anwendung](https://ris.planungsregion-abw.de/mapbender/application/pv_dachflaechenpot_rpg_abw) diff --git a/digipipe/store/raw/rpg_abw_pv_roof_potential/metadata.json b/digipipe/store/raw/rpg_abw_pv_roof_potential/metadata.json new file mode 100644 index 00000000..621e632c --- /dev/null +++ b/digipipe/store/raw/rpg_abw_pv_roof_potential/metadata.json @@ -0,0 +1,112 @@ +{ + "name": "rpg_abw_pv_roof_potential", + "title": "Dachflächenpotenzial PV-Aufdachanlagen in ABW", + "id": "rpg_abw_pv_roof_potential", + "description": "Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "PV", + "Photovoltaic", + "Flächenpotential", + "Aufdachanlagen" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "ABW", + "extent": "ABW", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-10", + "timeseries": null + }, + "sources": [ + { + "title": "Dachflächenpotenzial PV-Aufdachanlagen in ABW", + "description": "Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft.", + "path": "https://www.planungsregion-abw.de/geodaten/", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / 2022" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/rpg_abw_regional_plan/data/.gitkeep b/digipipe/store/raw/rpg_abw_regional_plan/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/rpg_abw_regional_plan/dataset.md b/digipipe/store/raw/rpg_abw_regional_plan/dataset.md new file mode 100644 index 00000000..663538b7 --- /dev/null +++ b/digipipe/store/raw/rpg_abw_regional_plan/dataset.md @@ -0,0 +1,42 @@ +# Regionalplan Anhalt-Bitterfeld-Wittenberg + +Geodatensätze aus Teilplänen Wind 2018 und 2027 der Regionalen +Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg. + +## Sachlicher Teilplan Wind 2018 + +Geodaten aus rechtskräftigem +[Sachlichen Teilplan Wind 2018](https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2018/). + +> Im Sachlichen Teilplan "Nutzung der Windenergie in der Planungsregion +> Anhalt-Bitterfeld-Wittenberg" vom 30.05.2018 werden 22 Vorranggebiete für die +> Nutzung der Windenergie mit der Wirkung von Eignungsgebieten festgelegt. Sie +> dienen der raumordnerischen Steuerung der Errichtung von raumbedeutsamen +> Windenergieanlagen in Konzentrationszonen. +> +> Die oberste Landesentwicklungsbehörde hat am 01.08.2018 die Genehmigung +> erteilt. Mit Bekanntmachung der Genehmigung tritt der Sachliche Teilplan in +> Kraft. + +Dateien: + +- Vorrang-/Eignungsgebiete: `stp_2018_vreg.gpkg` + ([Quelle](https://gis.planungsregion-abw.de/geoserver/stp_wind2018/ows?SERVICE=WFS&REQUEST=GetCapabilities)) + +## Sachlicher Teilplan Wind 2027 + +Geodaten aus Planentwurf des +[Sachlichen Teilplan Wind 2027](https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2027/). + +> Die Regionalversammlung hat am 03.03.2023 beschlossen, den Sachlichen +> Teilplan "Windenergie 2027 in der Planungsregion Anhalt-Bitterfeld-Wittenberg" +> aufzustellen und mit der Bekanntgabe der Allgemeinen Planungsabsicht die +> beabsichtigten Auswahlkriterien und mögliche Gebietskulisse der Vorranggebiete +> für die Nutzung der Windenergie bzw. für Repowering von Windenergieanlagen +> vorzustellen. + +Dateien: + +- Suchräume: `stp_2027_suchraum.gpkg` (Quelle: RPG ABW) +- Planabsicht Vorranggebiete: `stp_2027_ideen_vr.gpkg` (Quelle: RPG ABW) +- Planabsicht Repoweringgebiete: `stp_2027_ideen_repower.gpkg` (Quelle: RPG ABW) diff --git a/digipipe/store/raw/rpg_abw_regional_plan/metadata.json b/digipipe/store/raw/rpg_abw_regional_plan/metadata.json new file mode 100644 index 00000000..585f2deb --- /dev/null +++ b/digipipe/store/raw/rpg_abw_regional_plan/metadata.json @@ -0,0 +1,111 @@ +{ + "name": "rpg_abw_regional_plan", + "title": "Regionalplan Anhalt-Bitterfeld-Wittenberg", + "id": "rpg_abw_regional_plan", + "description": "Geodatensätze aus Teilplänen Wind 2018 und 2027 der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Geodatensätze", + "Teilpläne", + "PLanungsgemeinschaft" + ], + "publicationDate": "2018-05-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "ABW", + "extent": "ABW", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-03-03", + "timeseries": null + }, + "sources": [ + { + "title": "Regionalplan Anhalt-Bitterfeld-Wittenberg", + "description": "Geodatensätze aus Teilplänen Wind 2018 und 2027 der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg.", + "path": ["https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2018/", "https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2027/"], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / Jahr" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / Jahr" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/seenergies_peta5/data/.gitkeep b/digipipe/store/raw/seenergies_peta5/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/seenergies_peta5/dataset.md b/digipipe/store/raw/seenergies_peta5/dataset.md new file mode 100644 index 00000000..24118e8e --- /dev/null +++ b/digipipe/store/raw/seenergies_peta5/dataset.md @@ -0,0 +1,26 @@ +# sEEnergies Pan-European Thermal Atlas 5.2 (Peta5) + +Wärmebedarf für Europa 2015 in GJ (1ha Auflösung) für + +- Haushalte: Raumwärme und Warmwasser +- GHD: Raumwärme, Warmwasser und Prozesswärme + +Die Daten können auf der +[Projektseite](https://s-eenergies-open-data-euf.hub.arcgis.com) +eingesehen werden. + +## Haushalte + +Abgerufen mittels + +```commandline +wget -O Peta5_0_1_HD_res.zip https://arcgis.com/sharing/rest/content/items/d7d18b63250240a49eb81db972aa573e/data +``` + +## GHD und Industrie + +Abgerufen mittels + +```commandline +wget -O Peta5_0_1_HD_ser.zip https://arcgis.com/sharing/rest/content/items/52ff5e02111142459ed5c2fe3d80b3a0/data +``` diff --git a/digipipe/store/raw/seenergies_peta5/metadata.json b/digipipe/store/raw/seenergies_peta5/metadata.json new file mode 100644 index 00000000..508362a4 --- /dev/null +++ b/digipipe/store/raw/seenergies_peta5/metadata.json @@ -0,0 +1,112 @@ +{ + "name": "seenergies_peta5", + "title": "sEEnergies Pan-European Thermal Atlas 5.2 (Peta5)", + "id": "seenergies_peta5", + "description": "Wärmebedarf für Europa 2015 in GJ (1ha Auflösung)", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "European", + "Photovoltaic", + "Flächenpotential", + "Aufdachanlagen" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Europe", + "extent": "Europe", + "resolution": "1 ha" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": null + }, + "sources": [ + { + "title": "sEEnergies Pan-European Thermal Atlas 5.2 (Peta5)", + "description": "Wärmebedarf für Europa 2015 in GJ (1ha Auflösung)", + "path": "https://www.seenergies.eu/peta5/", + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": "The data provided is indiative and for research purpose only", + "attribution": "© Flensburg, Halmstad and Aalborg Universities 2022" + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": "The data provided is indiative and for research purpose only", + "attribution": "© Flensburg, Halmstad and Aalborg Universities 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/stala_st_energy/data/.gitkeep b/digipipe/store/raw/stala_st_energy/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/stala_st_energy/dataset.md b/digipipe/store/raw/stala_st_energy/dataset.md new file mode 100644 index 00000000..3f7a236e --- /dev/null +++ b/digipipe/store/raw/stala_st_energy/dataset.md @@ -0,0 +1,10 @@ +# Energiedaten Sachsen-Anhalt + +Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes +Sachsen-Anhalt. + +## Daten + +Stromverbrauch der Industriebetriebe nach Kreisen 2003-2021 in MWh + +- [Quelle](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986) diff --git a/digipipe/store/raw/stala_st_energy/metadata.json b/digipipe/store/raw/stala_st_energy/metadata.json new file mode 100644 index 00000000..a912d129 --- /dev/null +++ b/digipipe/store/raw/stala_st_energy/metadata.json @@ -0,0 +1,118 @@ +{ + "name": "stala_st_energy", + "title": "Energiedaten Sachsen-Anhalt", + "id": "stala_st_energy", + "description": "Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes Sachsen-Anhalt.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Energiedaten", + "Energieversorgung", + "Wasserversorgung" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Sachsen-Anhalt", + "extent": "Sachsen-Anhalt", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": [ + { + "start": "2003-01-01T00:00+01", + "end": "2021-12-31T23:00+01", + "resolution": "1 a", + "alignment": "left" + } + ] + }, + "sources": [ + { + "title": "Energiedaten Sachsen-Anhalt", + "description": "Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes Sachsen-Anhalt.", + "path": "https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Statistisches Landesamt Sachsen-Anhalt, Halle (Saale)." + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© Statistisches Landesamt Sachsen-Anhalt, Halle (Saale)." + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/stala_st_pop_prog/data/.gitkeep b/digipipe/store/raw/stala_st_pop_prog/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/stala_st_pop_prog/dataset.md b/digipipe/store/raw/stala_st_pop_prog/dataset.md new file mode 100644 index 00000000..78cb55a7 --- /dev/null +++ b/digipipe/store/raw/stala_st_pop_prog/dataset.md @@ -0,0 +1,4 @@ +# Bevölkerungsprognose Sachsen-Anhalt + +Bevölkerungsprognose je Gemeinde bis 2035 des Statistischen Landesamtes +Sachsen-Anhalt. Stand: 2021 diff --git a/digipipe/store/raw/stala_st_pop_prog/metadata.json b/digipipe/store/raw/stala_st_pop_prog/metadata.json new file mode 100644 index 00000000..017ab676 --- /dev/null +++ b/digipipe/store/raw/stala_st_pop_prog/metadata.json @@ -0,0 +1,109 @@ +{ + "name": "stala_st_pop_prog", + "title": "Regionalisierte Bevölkerungsprognose", + "id": "stala_st_pop_prog", + "description": "Prognostizierter Bevölkerungsstand in den Gemeinden, kreisfreien Städten und Landkreisen nach Prognosejahr und Geschlecht", + "language": [ + "de-DE" + ], + "subject": [], + "keywords": [ + "Bevölkerungsprognose", + "population" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Sachsen-Anhalt", + "extent": "Sachsen-Anhalt", + "resolution": "" + }, + "temporal": { + "referenceDate": null, + "timeseries": [ + { + "start": "2019", + "end": "2035", + "resolution": "1 year", + "alignment": null, + "aggregationType": "sum" + } + ] + }, + "sources": [ + { + "title": "1_Internettabelle_7RBP_nach_Prognosejahr_Geschlecht_alle_Ebenen", + "description": "Prognostizierter Bevölkerungsstand in den Gemeinden, kreisfreien Städten und Landkreisen nach Prognosejahr und Geschlecht", + "path": "statistik.sachsen-anhalt.de/themen/bevoelkerung-mikrozensus-freiwillige-haushaltserhebungen/bevoelkerung/bevoelkerungsprognose-und-haushalteprognose/#c312231", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany – attribution – Version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "© 2023 Landesportal Sachsen-Anhalt " + } + ] + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } +}, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/raw/technology_data/data/.gitkeep b/digipipe/store/raw/technology_data/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/digipipe/store/raw/technology_data/dataset.md b/digipipe/store/raw/technology_data/dataset.md new file mode 100644 index 00000000..fc663b27 --- /dev/null +++ b/digipipe/store/raw/technology_data/dataset.md @@ -0,0 +1,1410 @@ +# Technologiedaten + +## Jahresvolllaststunden + +Anhand typischer heutiger und prognostizierter Werte für Sachsen-Anhalt werden +folgende Jahresvolllaststunden angenommen: + +| Technologie | Jahr | Volllaststunden | Quelle(n) für Annahme | Anmerkung | +|-----------------|------|----------------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| Windenergie | 2022 | 1800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wind/auswahl/811-durchschnittliche_ja/#goto_811) | | +| | 2045 | 2300 | [PV- und Windflächenrechner](https://zenodo.org/record/6794558) | | +| Freiflächen-PV | 2022 | 980 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/solar/auswahl/813-durchschnittliche_ja/#goto_813), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/aktuelle-fakten-zur-photovoltaik-in-deutschland.pdf) | | +| | 2045 | 980 | [PV- und Windflächenrechner](https://zenodo.org/record/6794558), [Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) | | +| Aufdach-PV | 2022 | 910 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/solar/auswahl/813-durchschnittliche_ja/#goto_813), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/aktuelle-fakten-zur-photovoltaik-in-deutschland.pdf) | | +| | 2045 | 910 | [Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) | | +| Laufwasserkraft | 2022 | 3800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wasser/auswahl/840-durchschnittliche_ja/#goto_840) | | +| | 2045 | 3800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wasser/auswahl/840-durchschnittliche_ja/#goto_840) | | +| Bioenergie | 2022 | 6000 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/bioenergie/auswahl/814-durchschnittliche_ja/#goto_814), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/DE2018_ISE_Studie_Stromgestehungskosten_Erneuerbare_Energien.pdf) | Bioenergie-Stromerzeugung (ohne
biogenen Teil des Abfalls) | +| | | | | | + +Datei: `technology_data.json` --> `full_load_hours` + +TBD: Generalisieren - automatische Generierung anhand von Global Wind Atlas / +Global Solar Atlas. + +## Leistungsdichte + +Installierbare Leistung pro Fläche / spezifischer Flächenbedarf: + +- Windenergie: 21 MW/km² +- PV-Freiflächenanlagen: 100 MW/km² +- PV-Aufdachanlagen: 140 MW/km² +- Solarthermie: ? MW/km² + +Quelle: [PV- und Windflächenrechner](https://zenodo.org/record/6794558) + +Datei: `technology_data.json` --> `power_density` + +## Nennleistung Windenergieanlage + +Als Zukunftsanlage für 2045 wird eine Enercon E126 6500 (6,5 MW) angenommen. +Diese wird für die Berechnung der Anlagenanzahl in den Ergebnissen +verwendet. + +Datei: `technology_data.json` --> `nominal_power_per_unit` + +## Batterien + +- Kleinbatterien/Heimspeicher: Nennkapazität je installierter PV-Peakleistung + und Speichernennleistung je installierter Speichernennkapazität aus + [bnetza_mastr](../../raw/bnetza_mastr/dataset.md) und + [HTW](https://solar.htw-berlin.de/wp-content/uploads/HTW-Stromspeicher-Inspektion-2023.pdf). +- Großbatterien: Speichernennleistung je installierter Speichernennkapazität + aus [bnetza_mastr](../../raw/bnetza_mastr/dataset.md). + +Datei: `technology_data.json` --> `batteries` + +## Warmwasserspeicher + +- Kleinwärmespeicher (dezentral): Speichernennleistung je installierter + Speichernennkapazität aus + [DEA](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage) +- Großwärmespeicher (Fernwärme): Speichernennleistung je installierter + Speichernennkapazität aus + [DEA](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage) + +Datei: `technology_data.json` --> `hot_water_storages` + +## Kosten und Wirkungsgrade + +Datei: `raw_costs_efficiencies.csv` + +#### Allgemein + +Preise werden aus den Technologie Datenblättern der Danish Energy +Agency ([1], [2], [3], [4]) entnommen. +Abweichungen werden gesondert genannt. + +alle Preise werden auf Euro im Jahr 2020 (dis-)kontiert und damit +inflationsbereinigt. + +Für Quellen +[1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and), +[2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants), +[3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage), +[4](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels) +ist das meist die Umrechnung von 2015 zu 2020. Dafür folgende Formel verwendet: + +``` +P_(2020) = P_(2015)*f_(infl) +f_(infl) = (1+i_(2015))*(1+i_(2016))...*(1+i_(2019)) +f_(infl) = 1,005 * 1,005 * 1.015 * 1,018 * 1,014 = 1,0582 +``` + +[8](https://de.statista.com/themen/112/inflation/#topicOverview) + +Werte für 2045 werden durch lineare Extrapolation ermittelt. + +#### biogas_upgrading plant + +Quelle: [4] "82 Biogas, upgrading" + +Aufbereitung von Biogas zu Bio-SNG + +#### biogas bpchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, biogas" + +Backpressure Combined heat and power (bpchp) modelliert BHKWs + +thermal effiency = electrical_effiency / (c_b+c_v) ( +laut [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +S. 390) + +#### biogas bpchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, biogas" + +Identische Werte zu biogas bpchp_central. Split fürs Energiesystem, aber +eingesetzte Technologie identisch. + +#### biogas_plant + +Quelle [4](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels): +"81 Biogas Plant, Basic conf." + +Stellt Biogas bereit, welches in KWK (biogas bpchp_central, biogas +bpchp_decentral) genutzt werden kann + +#### boiler_central + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): +"44 Natural Gas DH Only" + +#### boiler_decentral + +Quelle [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants): +"202 Gas boiler, ex single", "202 Gas boiler, ex apart", "202 Gas boiler, new +single", +"202 Gas boiler, new apart" + +Es werden für jedes Szenario jeder Wert aus 4 Komponenten zusammengesetzt. + +Diese sind die Kombinationen aus: + +- Altbau-Neubau +- Einfamilienhaus-Mehrfamilienhaus + +Diese Kompnonten werden durch Faktoren gewichtet zusammengefasst. + +Für 2020: + +- Verhältnis von Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Neubau + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb), + verbaute Gasheizungen aggregiert +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Altbau wird als 0.7 / 0.3 + angenommen + +Für 2045: + +- Verhältnis von Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Neubau + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb), + verbaute Gasheizungen in 2020 +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Altbau wird als 0.7 / 0.3 + angenommen + +volle Berechnungen siehe "boiler_small_script.py" im Code Anhang + +#### ch4 bpchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, natural gas" + +Backpressure Combined heat and power (bpchp) modelliert BHKWs + +thermal effiency = electrical_effiency / (c_b+c_v) ( +laut [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +S. 390) + +#### ch4 bpchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"06 Gas engines, natural gas" + +Identische Werte zu ch4 bpchp_central. Split fürs Energiesystem, aber +eingesetzte Technologie identisch. + +#### ch4 extchp_central + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"05 Gas turb. CC, steam extract., Large", [14] S. 20-21 + +#### ch4 extchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"05 Gas turb. CC, steam extract., Large" + +[14] S. 20-21 + +Identisch wie ch4 extchp_central + +#### gt + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +04 Gas turb. simple cycle, L" + +gas turbine, offener Prozess + +#### heatpump_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +40 Comp. hp, airsource 10 MW" + +Wärmepumpentechnologie (Luft-Wasser-WP) aus Langfristigkeitsszenarien + +#### heatpump_decentral + +Quellen: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants +" +207 HP air-water,ex single", "207 HP air-water,ex apart", "207 HP air-water,new +single", "207 HP air-water,new apart", " +207 HP ground-water,ex single", "207 HP ground-water,ex apart", "207 HP +ground-water,new single", "207 HP +ground-water,new apart", +[5], [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb) + +Es werden für jedes Szenario jeder Wert aus 8 Komponenten zusammengesetzt. +Diese sind die Kombinationen aus: + +- Sole-Umwelt +- Einfamilienhaus-Mehrfamilienhaus (fast alle WP in Einfamilienhäsuern!) +- Altbau-Neubau + +Es wird das gemittelte Verhätnis Deutschlandweit der letzten 20 Jahre +angenommen (BBSR; Bundesamt für Bauwesen und +Raumordnung) + +Für 2020 wurden Annahmen für das allgemeine Verhältnis zwischen den +Möglichkeiten angenommen: + +- Sole-Umwelt sind die aggregierten Absatzzahlen aus [5] +- Einfamilienhaus-Mehrfamilienhaus + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb) +- Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) + +Mit diesen wird für 2045 wurden Annahmen für das allgemeine Verhältnis zwischen +den Möglichkeiten angenommen: + +- Sole-Umwelt = 0.87/0.13 (Das sind die Absatzzahlen aus 2022 aus der + Branchenstudie) +- Einfamilienhaus-Mehrfamilienhaus = 0.7 / 0.3 (Das ist eine freie Annahme, die + eine fortschreitende Verbreitung in + Mehrfamilienhäusern annimmt) +- Altbau-Neubau = 0.699 / 0.301 (das gemittelte Verhätnis Deutschlandweit der + letzten 20 Jahre) + +Die Faktoren in 2045 sind daher: + +- Altbau_Umwelt_EFH = 0.4256 +- Altbau_Umwelt_MFH = 0.1824 +- Altbau_Sole_EFH = 0.0636 +- Altbau_Sole_MFH = 0.0272 +- Neubau_Umwelt_EFH = 0.1833 +- Neubau_Umwelt_MFH = 0.0785 +- Neubau_Sole_EFH = 0.0273 +- Neubau_Sole_MFH = 0.0117 + +Berechnung siehe "heatpump_small_script.py" im Code Anhang + +#### large_scale_battery + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +180 Lithium Ion Battery", Cebulla [9] S. 181 + +storage_fixom_cost Berechnung aus UMAS/Oemof_B3 übernommen, ohne Quelle dieser +Berechnung gefunden zu haben. + +storage_fixom_cost = 0,005 * storage_capacity_cost_overnight + +Große Differenzen zwischen Windnode und UMAS, UMAS Methodik übernommen + +#### pth_central + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +41 Electric Boilers, small", "41 Electric Boilers, large" + +Es wurde ein Mittelwert aus den Electric Biolers small und large gebildet, um +relevante Größen in ABW abzubilden. + +#### pth_decentral + +Quellen: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants): " +216 Electric heating,new single", "216 Electric heating,new apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral, nur ohne Kombination mit +Altbau, da power to heat in Altbauten +vernachlässigbar selten (und wenn in anderen Technologien wie +Nachtspeicherheizungen) vorkommt. + +Berechnungen siehe "pth_decentral_script" im Code Anhang + +#### small_scale_battery + +Quelle: [15](https://www.zhb-flensburg.de/fileadmin/content/spezial-einrichtungen/zhb/dokumente/dissertationen/fluri/fluri-2019-wirtschaftlichkeit-dez-stromspeicher.pdf), [17] +S. 3 + +- capacity_cost_overnight: [15] S. 41 +- effiency, lost_rate, lifetime: [15] S.v91 + +#### storage heat_central + +Quelle [3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage): " +141 +Large hot water tank" + +- capacity_cost_overnight und fixom_cost ignoriert, da + storage_capacity_cost_overnight, storage_fixom_cost einen Wert + hat +- storage heat_decentral + +Quelle [3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage): " +141 +Large hot water tank" + +capacity_cost_overnight und fixom_cost ignoriert, da +storage_capacity_cost_overnight, storage_fixom_cost einen Wert hat + +Große Differenzen zwischen UMAS und Windnode, UMAS Methodik übernommen + +#### hydro ror + +Quellen: [16] + +- fixom_cost: S. 78 +- capacity_cost_overnight: S.75 +- lifetime: S. 72 + +#### lignite oven + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +206 Wood stove, single, ex tank" + +Der Kohleofen ist eine Komponente, die für die Abbildung des Ist-Zusandes +relevant ist. +Die Kohleheizung wird durch gesetzliche Regulierung nicht mehr neu verbaut +werden können, wodurch die Komponente für die +Optimierung nicht relevant ist. +Auch die Datenlage für die Kohleheizung sehr schlecht ist, die Daten werden +daher approximiert. + +Keine direkten Werte vorhanden, daher Modellierung anhand der wood stove Werte + +efficiency: + +Differenz der Energie zwischen Holz und Kohle liegt im Heizwert des Brennstoffs. +Daher wird die Effizienz der wood stove +mit Faktor des Verhältnisses der Heizwerte multipliziert. +Daten für Heizwerte von +BMWK [11](https://www.bmwk.de/Redaktion/DE/Artikel/Energie/energiedaten-gesamtausgabe.html) +und [12](https://books.google.de/books?id=n0fVYjrHAlwC&pg=PA58#v=onepage&q&f=false) +ergibt einen Faktor von 4/3 + +fixom_cost: + +Bestehen großteils aus Brennstoffkosten. Änderung zu wood stove besteht aus +Heizwert (gewonnene Energie pro kg) und +Preisdiff pro Kilogramm + +Preise aus brikett-rekord.com [13] + +lifetime: + +identisch wie wood stove + +marginal-cost: identisch wie wood stove + +Aus den Annahmen folgt, dass die Investkosten ignoriert werden können. + +#### pv_ground + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +22 Utility-scale PV", Vergleich [10] + +marginal_cost = 0, da in Quellen nicht vorhanden + +Kosten +aus [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +im Bereich von [10] + +#### pv_rooftop + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +22 PV commercial&industrial rooftop", "22 PV residential", Vergleich [10] + +gewichteter Mittelwert zwischen kommerziellen und Wohnhaus PV. + +Gewichtung anhand openMaStR Daten aus Pipeline + +``` +import geopandas as gpd +import os.path + +data_folder = os.path.join("/ROAD/TO/DATA") +data = "bnetza_mastr_pv_roof_region.gpkg" + +df = gpd.read_file(os.path.join(data_folder, data)) + +sum = df[["usage_sector", "status"]].groupby("usage_sector").count().sum() +industrial = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Industrie"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Sonstige"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Landwirtschaft"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc[ + "Gewerbe, Handel und Dienstleistungen"][0]) \ + / sum +residental = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Öffentliches Gebäude"][0] + + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Haushalt"][0]) / sum +return [industrial, residental] +``` + +ergibt 25 % industrial und 75% Haushalte. + +marginal_cost = 0, da in Quellen nicht vorhanden + +Kosten +aus [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +im Bereich von [10] + +#### thermalcollector_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +46 Solar District Heating" + +#### thermalcollector_decentral + +Quelle: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants +" +215 Solar heating,ex single", "215 Solar heating,ex apart", "215 Solar +heating,new single", "215 Solar heating,new +apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral. + +Berechnungen siehe "thermalcollector_decentral_script" im Code Anhang + +#### wind onshore + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +20 Onshore turbines", +Vergleich [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + +EE Kosten durchweg kleiner als in Windnode in 2020 + +Windnode bezieht sich auf Frauenhofer ISE aus 2018, Vorgängerstudie +zu [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + +Frauenhofer (S. + +11) [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + CAPEX-Range höher als + DEA [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) + in 2020 + +1400000-2000000 [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) +zu +1190000 [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +€/MW + +keine Aussagen in +Frauenhofer [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) +über 2045 + +wir wählen DEA als Quelle für die Vergleichbarkeit, da Vergleichbarkeit in der +Optimierung der Modellierung Vorrang hat + +#### wood extchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +09a Wood Chips, Medium" + +[14] S. 20-21 + +#### wood extchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +09a Wood Chips, Medium" + +[14] S. 20-21 + +identisch zu wood extchp_central + +#### wood oven + +Quelle: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants), " +204 Biomass auto,ex single", "204 Biomass auto,new single", "204 Biomass auto,ex +apart", "204 Biomass auto,new apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral. + +Berechnungen siehe "wood_oven_script" im Code Anhang + +#### Quellen + +[1] Danish Energy Agency (2016): "Technology Data - Energy Plants for +Electricity and District heating generation", +Version 13, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and + +[2] Danish Energy Agency (2016): "Technology Data for heating installations", +Version 4, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants + +[3] Danish Energy Agency (2018): "Technology Data – Energy storage", Version 7, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage + +[4] Danish Energy Agency (2017): "Technology Data – Renewable fuels", Versoin 9, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels + +[5] Karl-Heinz Backhaus (Vaillant), Dr. Hendrik Ehrhardt (Stiebel Eltron), Sven +Kersten (NIBE), Steffen +Moser (EnBW), Frank Richert (Wolf), Ingo Rieger (Bosch), Egbert Tippelt ( +Viessmann), André Jacob +(BWP), Johanna Otting (BWP), Björn Schreinermacher (BWP)(2023): "Branchenstudie +2023: Marktentwicklung – Prognose +–Handlungsempfehlungen", Bundesverband Wärmepumpe (BWP) e. V. + +[6] Statistisches Landesamt Sachsen-Anhalt: "GENESIS-Tabelle: 31121-0006, +Statistik der Baufertigstellungen", +von https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb, +Stand: 11.04.2023 + +[7] Statista Research Department(2021): "Struktur des Wohnungsbaus nach Neubau +und Sanierung in Deutschland in den +Jahren 2001 bis 2020", +von https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/, +Stand: 03.04.2023 12:26:20 + +[8] Statista: "Daten und Fakten zur Inflation und den Verbraucherpreisen" , +von https://de.statista.com/themen/112/inflation/#topicOverview , Stand: +29.03.2023 + +[9] Cebulla, Felix (2017): "Storage demand in highly renewable energy scenarios +for Europe", OPUS - Online Publikationen +der Universität Stuttgart, von https://elib.uni-stuttgart.de/handle/11682/9778 + +[10] Frauenhofer ISE (2019): "Stromgestehungskosten erneuerbare Energien", +von https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html + +[11] BMWK (2021): "Energiedaten" +von https://www.bmwk.de/Redaktion/DE/Artikel/Energie/energiedaten-gesamtausgabe.html + +[12] Michael Herrmann, Jürgen Weber: Öfen und Kamine: Raumheizungen fachgerecht +planen und bauen. Beuth Verlag, 201, +von https://books.google.de/books?id=n0fVYjrHAlwC&pg=PA58#v=onepage&q&f=false + +[13] www.brikett-rekord.com: "Energiekostenvergleich", +von https://www.brikett-rekord.com/de/heizwertvergleich-rekord-briketts.html, +letzter Abruf 8.5.2023 + +[14] WindNode: Modell, Methodik, Daten, ABW; +von: RLI +letzer Abruf 8.8.2023 + +[15] Fluri, Verena: "Wirtschaftlichkeit von zukunftsfähigen Geschäftsmodellen +dezentraler Stromspeicher" +von https://www.zhb-flensburg.de/fileadmin/content/spezial-einrichtungen/zhb/dokumente/dissertationen/fluri/fluri-2019-wirtschaftlichkeit-dez-stromspeicher.pdf, +letzter Abruf 8.8.2023 + +[16] Schröder, Andreas; Kunz, Friedrich; Meiss, Jan; Mendelevitch, Roman; +Hirschhausen, Christian von: "Current and Prospective Costs of Electricity +Generation until 2050" +von https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf, +letzter Abruf 8.8.2023 + +[17] Prüggler, Wolfgang (2019): "HEIMSPEICHERSYSTEME UND ZENTRALE +BATTERIESPEICHER – KRITISCHE FAKTOREN DER WIRTSCHAFTLICHKEIT" +von https://ens.dk/sites/ens.dk/files/Analyser/technology_data_catalogue_for_energy_storage.pdf, +letzter Abruf 8.8.2023 + +### Code Anhang + +wood_oven_script.py + +``` +import pandas as pd +import os.path + + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * single_new + multiple_new_faktor = neubau * multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +# Daten aus DEA: +# einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +#datensheets +single_old = pd.read_excel(data, "204 Biomass auto,ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "204 Biomass auto,ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "204 Biomass auto,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "204 Biomass auto,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_effiency_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[3,5], single_old.iat[3,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[3,5], multiple_old.iat[3,6]) , + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_marginal_cost_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[23,2] / 1000, single_old.iat[23,2] / 1000), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[23,2] / 1000, multiple_old.iat[23,2] / 1000), + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2045 = [dic_capacity_cost_overnight_2045,dic_effiency_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2045 = [] + +# Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +thermal_collector_small_script.py + +``` +import pandas as pd +import os.path + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * single_new + multiple_new_faktor = neubau * multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +# Daten aus DEA: +# einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +#datensheets +single_old = pd.read_excel(data, "215 Solar heating,ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "215 Solar heating,ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "215 Solar heating,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "215 Solar heating,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045] +val_2045 = [] + +# Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +pv_rooftop_script.py: + +``` +import pandas as pd +import geopandas as gpd +import os.path + +#trennt residential and industrial rooftop PV nach Nennleistung +def get_proprtion_residential_industrtial(df): + sum = df[["usage_sector", "status"]].groupby("usage_sector").count().sum() + industrial = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Industrie"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Sonstige"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Landwirtschaft"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc[ + "Gewerbe, Handel und Dienstleistungen"][0]) \ + / sum + residental = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Öffentliches Gebäude"][0] + + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Haushalt"][0]) / sum + return [industrial, residental] + + +def get_qgis_df(GeoDataFrame): + gdf = gpd.read_file(GeoDataFrame, where="geometry_approximated='0'") + gdf.where(gdf["status"] == "In Betrieb").to_file("bnetza_mastr_pv_roof_region_filtered.gpkg") + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + +def get_agg_price_2045(dic, proportion): + # getting faktoren + industrial_factor = proportion[0][0] + residential_factor = proportion[1][0] + + residential = residential_factor * dic["residential_price"] + industrial = industrial_factor * dic["industrial_price"] + + preis = residential + industrial + + return preis + + +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = ["bnetza_mastr_pv_ground_region.gpkg", "bnetza_mastr_pv_roof_region.gpkg"] + +df = gpd.read_file(os.path.join(data_folder, data[1])) + +# Daten aus DEA: +# einlesen von Daten +data_folder_sheets = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data_sheets = os.path.join(data_folder_sheets, "technology_data_for_el_and_dh.xlsx") + +#datensheets +residential = pd.read_excel(data_sheets, "22 Rooftop PV residential", skiprows=4, nrows=42) +industrial = pd.read_excel(data_sheets, "22 Rooftop PV comm.&industrial", skiprows=4, nrows=42) + +proportion = get_proprtion_residential_industrtial(df) + +dic_capacity_cost_overnight_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[10,5], residential.iat[10,6])*1000000, + "industrial_price": linear_interpolate_2045(industrial.iat[10,5], industrial.iat[10,6])*1000000 +} +dic_fixom_cost_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[18,5], residential.iat[18,6]), + "industrial_price": linear_interpolate_2045(industrial.iat[18,5], industrial.iat[18,6]), +} + +dic_lifetime_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[3,5], residential.iat[3,6]), + "industrial_price": linear_interpolate_2045(industrial.iat[3,5], industrial.iat[3,6]), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045] +val_2045 = [] + +# Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic, proportion)) + +print(dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045) +print(proportion[0][0]) +print(val_2045) +``` + +Pth_decentral_sc0irpt.py + +``` +import pandas as pd +import os.path + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + single_new_faktor = single_new + multiple_new_faktor = multiple_new + + + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_new + multiple_new + + return preis + + +# Daten aus DEA: +# einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +#datensheets +single_new = pd.read_excel(data, "216 Electric heating,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "216 Electric heating,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} +dic_effiency_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} +dic_fixom_cost_2045 = { + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} +dic_marginal_cost_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_effiency_2045 , dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2045 = [] + +print(dic_fixom_cost_2045) +# Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +heatpump_small_script.py: + +``` +import pandas as pd +import os.path + +def get_faktoren_new(df): + + hp_agg = {"single_erdwaerme": 0, "multiple_erdwaerme": 0, "single_umweltwaerme": 0, "multiple_umweltwaerme": 0,} + + for row in df.itertuples(): + bereich = row[1].split(",")[2] + energie = row[1].split(",")[3] + try: + count_insg = int(row[1].split(",")[4]) + count_single = int(row[1].split(",")[5]) + except: + ValueError + + if bereich == "Sachsen-Anhalt": + if energie == "Geothermie": + hp_agg["single_erdwaerme"] += count_single + hp_agg["multiple_erdwaerme"] += (count_insg - count_single) + elif energie == "Umweltthermie (Luft / Wasser)": + hp_agg["single_umweltwaerme"] += count_single + hp_agg["multiple_umweltwaerme"] += (count_insg - count_single) + else: + continue + + else: + continue + + hp_agg_sum = sum(hp_agg.values()) + air_single_new = hp_agg["single_umweltwaerme"] / hp_agg_sum + air_multiple_new = hp_agg["multiple_umweltwaerme"] / hp_agg_sum + ground_single_new = hp_agg["single_erdwaerme"] / hp_agg_sum + ground_multiple_new = hp_agg["multiple_erdwaerme"] / hp_agg_sum + + return air_single_new, air_multiple_new, ground_single_new, ground_multiple_new + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2020(dic): + # nach BWP: Absatz von 2010-2020 -> bildet Bestand mit Kosten von Neubau ab + wp_neubau_abs = 52500+52500+45000+45000+37500+37500+37500+37500+30000+30000+30000 + wp_altbau_abs = 67500+37500+37500+37500+30000+22500+22500+22500+30000+30000+22500 + wp_gesamt_abs = wp_altbau_abs + wp_neubau_abs + wp_neubau = wp_neubau_abs / wp_gesamt_abs + wp_geo = 333333 / (333333+750000) + wp_umwelt = 750000 / (333333+750000) #Umwelt = Luft und Wasser + + # Verhältnisse WP Alt und Neubau in ST nach destatis + # Daten einlesen + data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") + hp = os.path.join(data_folder, "2023_04_11_ST_thermische_Primärenergie_neubau_2010-2020.csv") + df = pd.read_csv(hp, encoding="ISO8859-1", delimiter=";", skiprows=range(0, 10), nrows=2150) + + faktoren_new = get_faktoren_new(df) + + air_water_single_new_faktor = wp_neubau * faktoren_new[0] + air_water_multiple_new_faktor = wp_neubau * faktoren_new[1] + ground_water_single_new_fakotr = wp_neubau * faktoren_new[2] + ground_water_multiple_new_faktor = wp_neubau * faktoren_new[3] + + # wp altbau: + altbau_air = wp_umwelt - (air_water_single_new_faktor + air_water_multiple_new_faktor) + altbau_ground = wp_geo - (ground_water_single_new_fakotr + ground_water_multiple_new_faktor) + + # keine Daten, daher wie neubau angenommen (es gibt keinen Grund zu glauben, dass im Mehrfamilien-Altbau mehr WP verbaut werden) + single_faktor = faktoren_new[0] + faktoren_new[2] # ca 0.95 + multiple_faktor = faktoren_new[1] + faktoren_new[3] # ca 0.05 + + air_water_single_old_faktor = altbau_air * single_faktor + air_water_multiple_old_faktor = altbau_air * multiple_faktor + ground_water_single_old_fakotr = altbau_ground * single_faktor + ground_water_multiple_old_faktor = altbau_ground * multiple_faktor + + + air_water_single_old = air_water_single_old_faktor * dic["air_water_single_old_price"] + air_water_multiple_old = air_water_multiple_old_faktor * dic["air_water_multiple_old_price"] + ground_water_single_old = ground_water_single_old_fakotr * dic["ground_water_single_old_price"] + ground_water_multiple_old = ground_water_multiple_old_faktor * dic["ground_water_multiple_old_price"] + + air_water_single_new = air_water_single_new_faktor * dic["air_water_single_new_price"] + air_water_multiple_new = air_water_multiple_new_faktor * dic["air_water_multiple_new_price"] + ground_water_single_new = ground_water_single_new_fakotr * dic["ground_water_single_new_price"] + ground_water_multiple_new = ground_water_multiple_new_faktor * dic["ground_water_multiple_new_price"] + + altbau_kosten = air_water_single_old + air_water_multiple_old + ground_water_single_old + ground_water_multiple_old + neubau_kosten = air_water_single_new + air_water_multiple_new + ground_water_single_new + ground_water_multiple_new + + preis = altbau_kosten + neubau_kosten + + faktoren = [air_water_single_old_faktor, + air_water_multiple_old_faktor, + ground_water_single_old_fakotr, + ground_water_multiple_old_faktor, + air_water_single_new_faktor, + air_water_multiple_new_faktor, + ground_water_single_new_fakotr, + ground_water_multiple_new_faktor, + ] + + return preis, faktoren + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau_allg_prozent = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau_allg_prozent = 1 - neubau_allg_prozent + + # Sole/Luft nach Absatz 2022 laut BWP + ground = 0.13 + air = 0.87 + + # Einfamilienhaus/Mehrfamilienhaus + single = 0.7 + multiple = 0.3 + + + # Faktoren + air_water_single_old_faktor = altbau_allg_prozent * air * single + air_water_multiple_old_faktor = altbau_allg_prozent * air * multiple + ground_water_single_old_fakotr = altbau_allg_prozent * ground * single + ground_water_multiple_old_faktor = altbau_allg_prozent * ground * multiple + air_water_single_new_faktor = neubau_allg_prozent * air * single + air_water_multiple_new_faktor = neubau_allg_prozent * air * multiple + ground_water_single_new_fakotr = neubau_allg_prozent * ground * single + ground_water_multiple_new_faktor = neubau_allg_prozent * ground * multiple + + air_water_single_old = air_water_single_old_faktor * dic["air_water_single_old_price"] + air_water_multiple_old = air_water_multiple_old_faktor * dic["air_water_multiple_old_price"] + ground_water_single_old = ground_water_single_old_fakotr * dic["ground_water_single_old_price"] + ground_water_multiple_old = ground_water_multiple_old_faktor * dic["ground_water_multiple_old_price"] + air_water_single_new = air_water_single_new_faktor * dic["air_water_single_new_price"] + air_water_multiple_new = air_water_multiple_new_faktor * dic["air_water_multiple_new_price"] + ground_water_single_new = ground_water_single_new_fakotr * dic["ground_water_single_new_price"] + ground_water_multiple_new = ground_water_multiple_new_faktor * dic["ground_water_multiple_new_price"] + + altbau_kosten = air_water_single_old + air_water_multiple_old + ground_water_single_old + ground_water_multiple_old + neubau_kosten = air_water_single_new + air_water_multiple_new + ground_water_single_new + ground_water_multiple_new + + preis = altbau_kosten + neubau_kosten + + faktoren = [air_water_single_old_faktor, + air_water_multiple_old_faktor, + ground_water_single_old_fakotr, + ground_water_multiple_old_faktor, + air_water_single_new_faktor, + air_water_multiple_new_faktor, + ground_water_single_new_fakotr, + ground_water_multiple_new_faktor, + ] + + return preis, faktoren + + +# Daten aus DEA: +# einlesen von Daten +data_folder = os.path.join("/home/local/RL-INSTITUT/aaron.schilling/Dokumente/Projekte/Digipipe") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +#datensheets +air_water_single_old = pd.read_excel(data, "207 HP air-water,ex single", skiprows=4, nrows=33) +air_water_multiple_old = pd.read_excel(data, "207 HP air-water,ex apart", skiprows=4, nrows=33) +ground_water_single_old = pd.read_excel(data, "207 HP air-water,new single", skiprows=4, nrows=33) +ground_water_multiple_old = pd.read_excel(data, "207 HP air-water,new apart", skiprows=4, nrows=33) +air_water_single_new = pd.read_excel(data, "207 HP ground-water,ex single", skiprows=4, nrows=33) +air_water_multiple_new = pd.read_excel(data, "207 HP ground-water,ex apart", skiprows=4, nrows=33) +ground_water_single_new = pd.read_excel(data, "207 HP ground-water,new single", skiprows=4, nrows=33) +ground_water_multiple_new = pd.read_excel(data, "207 HP ground-water,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2020 = { + "air_water_single_old_price": air_water_single_old.iat[19,2]*1000/(air_water_single_old.iat[0,2]/1000), + "air_water_multiple_old_price": air_water_multiple_old.iat[19,2]*1000/(air_water_multiple_old.iat[0,2]/1000), + "ground_water_single_old_price": ground_water_single_old.iat[19,2]*1000/(ground_water_single_old.iat[0,2]/1000), + "ground_water_multiple_old_price": ground_water_multiple_old.iat[19,2]*1000/(ground_water_multiple_old.iat[0,2]/1000), + "air_water_single_new_price": air_water_single_new.iat[19,2]*1000/(air_water_single_new.iat[0,2]/1000), + "air_water_multiple_new_price": air_water_multiple_new.iat[19,2]*1000/(air_water_multiple_new.iat[0,2]/1000), + "ground_water_single_new_price": ground_water_single_new.iat[19,2]*1000/(ground_water_single_new.iat[0,2]/1000), + "ground_water_multiple_new_price": ground_water_multiple_new.iat[19,2]*1000/(ground_water_multiple_new.iat[0,2]/1000), +} +dic_fixom_cost_2020 = { + "air_water_single_old_price": air_water_single_old.iat[24,2]/(air_water_single_old.iat[0,2]/1000), + "air_water_multiple_old_price": air_water_multiple_old.iat[24,2]/(air_water_multiple_old.iat[0,2]/1000), + "ground_water_single_old_price": ground_water_single_old.iat[24,2]/(ground_water_single_old.iat[0,2]/1000), + "ground_water_multiple_old_price": ground_water_multiple_old.iat[24,2]/(ground_water_multiple_old.iat[0,2]/1000), + "air_water_single_new_price": air_water_single_new.iat[24,2]/(air_water_single_new.iat[0,2]/1000), + "air_water_multiple_new_price": air_water_multiple_new.iat[24,2]/(air_water_multiple_new.iat[0,2]/1000), + "ground_water_single_new_price": ground_water_single_new.iat[24,2]/(ground_water_single_new.iat[0,2]/1000), + "ground_water_multiple_new_price": ground_water_multiple_new.iat[24,2]/(ground_water_multiple_new.iat[0,2]/1000), +} +dic_lifetime_2020 = { + "air_water_single_old_price": air_water_single_old.iat[5,2], + "air_water_multiple_old_price": air_water_multiple_old.iat[5,2], + "ground_water_single_old_price": ground_water_single_old.iat[5,2], + "ground_water_multiple_old_price": ground_water_multiple_old.iat[5,2], + "air_water_single_new_price": air_water_single_new.iat[5,2], + "air_water_multiple_new_price": air_water_multiple_new.iat[5,2], + "ground_water_single_new_price": ground_water_single_new.iat[5,2], + "ground_water_multiple_new_price": ground_water_multiple_new.iat[5,2], +} +dic_marginal_cost_2020 = { + "air_water_single_old_price": air_water_single_old.iat[23,2] / 1000, + "air_water_multiple_old_price": air_water_multiple_old.iat[23,2] / 1000, + "ground_water_single_old_price": ground_water_single_old.iat[23,2] / 1000, + "ground_water_multiple_old_price": ground_water_multiple_old.iat[23,2] / 1000, + "air_water_single_new_price": air_water_single_new.iat[23,2] / 1000, + "air_water_multiple_new_price": air_water_multiple_new.iat[23,2] / 1000, + "ground_water_single_new_price": ground_water_single_new.iat[23,2] / 1000, + "ground_water_multiple_new_price": ground_water_multiple_new.iat[23,2] / 1000, +} +dic_capacity_cost_overnight_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[19,5]*1000/(air_water_single_old.iat[0,5]/1000), air_water_single_old.iat[19,6]*1000/(air_water_single_old.iat[0,6]/1000)), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[19,5]*1000/(air_water_multiple_old.iat[0,5]/1000), air_water_multiple_old.iat[19,6]*1000/(air_water_multiple_old.iat[0,6]/1000)), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[19,5]*1000/(ground_water_single_old.iat[0,5]/1000),ground_water_single_old.iat[19,6]*1000/(ground_water_single_old.iat[0,6]/1000)), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[19,5]*1000/(ground_water_multiple_old.iat[0,5]/1000), ground_water_multiple_old.iat[19,6]*1000/(ground_water_multiple_old.iat[0,6]/1000)), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[19,5]*1000/(air_water_single_new.iat[0,5]/1000), air_water_single_new.iat[19,6]*1000/(air_water_single_new.iat[0,6]/1000)), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[19,5]*1000/(air_water_multiple_new.iat[0,5]/1000),air_water_multiple_new.iat[19,6]*1000/(air_water_multiple_new.iat[0,6]/1000)), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[19,5]*1000/(ground_water_single_new.iat[0,5]/1000), ground_water_single_new.iat[19,6]*1000/(ground_water_single_new.iat[0,6]/1000)), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[19,5]*1000/(ground_water_multiple_new.iat[0,5]/1000), ground_water_multiple_new.iat[19,6]*1000/(ground_water_multiple_new.iat[0,6]/1000)), +} +dic_fixom_cost_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[24,5]/(air_water_single_old.iat[0,5]/1000), air_water_single_old.iat[24,6]/(air_water_single_old.iat[0,6]/1000)), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[24,5]/(air_water_multiple_old.iat[0,5]/1000), air_water_multiple_old.iat[24,6]/(air_water_multiple_old.iat[0,6]/1000)), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[24,5]/(ground_water_single_old.iat[0,5]/1000), ground_water_single_old.iat[24,6]/(ground_water_single_old.iat[0,6]/1000)), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[24,5]/(ground_water_multiple_old.iat[0,5]/1000), ground_water_multiple_old.iat[24,6]/(ground_water_multiple_old.iat[0,6]/1000)), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[24,5]/(air_water_single_new.iat[0,5]/1000), air_water_single_new.iat[24,6]/(air_water_single_new.iat[0,6]/1000)), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[24,5]/(air_water_multiple_new.iat[0,5]/1000), air_water_multiple_new.iat[24,6]/(air_water_multiple_new.iat[0,6]/1000)), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[24,5]/(ground_water_single_new.iat[0,5]/1000), ground_water_single_new.iat[24,6]/(ground_water_single_new.iat[0,6]/1000)), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[24,5]/(ground_water_multiple_new.iat[0,5]/1000), ground_water_multiple_new.iat[24,6]/(ground_water_multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[5,5], air_water_single_old.iat[5,6]), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[5,5], air_water_multiple_old.iat[5,6]), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[5,5], ground_water_single_old.iat[5,6]), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[5,5], ground_water_multiple_old.iat[5,6]), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[5,5], air_water_single_new.iat[5,6]), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[5,5], air_water_multiple_new.iat[5,6]), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[5,5], ground_water_single_new.iat[5,6]), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[5,5], ground_water_multiple_new.iat[5,6]), +} +dic_marginal_cost_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[23,5] / 1000, air_water_single_old.iat[23,6] / 1000), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[23,5] / 1000, air_water_multiple_old.iat[23,6] / 1000), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[23,5] / 1000, ground_water_single_old.iat[23,6] / 1000), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[23,5] / 1000, ground_water_multiple_old.iat[23,6] / 1000), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[23,5] / 1000, air_water_single_new.iat[23,6] / 1000), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[23,5] / 1000, air_water_multiple_new.iat[23,6] / 1000), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[23,5] / 1000, ground_water_single_new.iat[23,6] / 1000), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[23,5] / 1000, ground_water_multiple_new.iat[23,6] / 1000), +} + +dic_2020 = [dic_capacity_cost_overnight_2020, dic_fixom_cost_2020, dic_lifetime_2020, dic_marginal_cost_2020] +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] + +val_2020 = [] +val_2045 = [] +faktoren_2020 = get_agg_price_2020(dic_fixom_cost_2045)[1] +faktoren_2045 = get_agg_price_2045(dic_fixom_cost_2045)[1] + +# Berechnungen +for dic in dic_2020: + val_2020.append(get_agg_price_2020(dic)[0]) + +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)[0]) + +print(val_2020, val_2045) +print(faktoren_2020, faktoren_2045) +``` + +boiler_small_script.py: + +``` +import pandas as pd +import os.path + +def get_faktoren_new(df): + + gas_agg = {"single": 0, "multiple": 0} + + for row in df.itertuples(): + bereich = row[1].split(",")[2] + energie = row[1].split(",")[3] + try: + count_insg = int(row[1].split(",")[4]) + count_single = int(row[1].split(",")[5]) + except: + ValueError + + if bereich == "Sachsen-Anhalt": + if energie == "Gas": + gas_agg["single"] += count_single + gas_agg["multiple"] += (count_insg - count_single) + else: + continue + + else: + continue + + gas_agg_sum = sum(gas_agg.values()) + single_new = gas_agg["single"] / gas_agg_sum + multiple_new = gas_agg["multiple"] / gas_agg_sum + + return single_new, multiple_new, + + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2020(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis + # Daten einlesen + data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") + hp = os.path.join(data_folder, "2023_04_11_ST_thermische_Primärenergie_neubau_2010-2020.csv") + df = pd.read_csv(hp, encoding="ISO8859-1", delimiter=";", skiprows=range(0, 10), nrows=2150) + + faktoren_new = get_faktoren_new(df) + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * faktoren_new[0] + multiple_new_faktor = neubau * faktoren_new[1] + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + + return preis + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + gas_single_new = 693 / 763 + gas_multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * gas_single_new + multiple_new_faktor = neubau * gas_multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +# Daten aus DEA: +# einlesen von Daten +data_folder = os.path.join("/ROAD/TO/DATA") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +#datensheets +single_old = pd.read_excel(data, "202 Gas boiler, ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "202 Gas boiler, ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "202 Gas boiler, new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "202 Gas boiler, new apart", skiprows=4, nrows=33) + + +dic_capacity_cost_overnight_2020 = { + "single_old_price": (single_old.iat[19,2]*1000)/(single_old.iat[0,2]/1000), + "multiple_old_price": (multiple_old.iat[19,2]*100)/(multiple_old.iat[0,2]/1000), + "single_new_price": (single_new.iat[19,2]*1000)/(single_new.iat[0,2]/1000), + "multiple_new_price": (multiple_new.iat[19,2]*1000)/(multiple_new.iat[0,2]/1000), +} +dic_effiency_2020 = { + "single_old_price": single_old.iat[3,2], + "multiple_old_price": multiple_old.iat[3,2], + "single_new_price": single_new.iat[3,2], + "multiple_new_price": multiple_new.iat[3,2], +} +dic_fixom_cost_2020 = { + "single_old_price": single_old.iat[24,2]/(single_old.iat[0,2]/1000), + "multiple_old_price": multiple_old.iat[24,2]/(multiple_old.iat[0,2]/1000), + "single_new_price": single_new.iat[24,2]/(single_new.iat[0,2]/1000), + "multiple_new_price": multiple_new.iat[24,2]/(multiple_new.iat[0,2]/1000), +} +dic_lifetime_2020 = { + "single_old_price": single_old.iat[5,2], + "multiple_old_price": multiple_old.iat[5,2], + "single_new_price": single_new.iat[5,2], + "multiple_new_price": multiple_new.iat[5,2], +} +dic_marginal_cost_2020 = { + "single_old_price": single_old.iat[23,2] / 1000, + "multiple_old_price": multiple_old.iat[23,2] / 1000, + "single_new_price": single_new.iat[23,2] / 1000, + "multiple_new_price": multiple_new.iat[23,2] / 1000, +} + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_effiency_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[3,5], single_old.iat[3,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[3,5], multiple_old.iat[3,6]) , + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_marginal_cost_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[23,2] / 1000, single_old.iat[23,2] / 1000), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[23,2] / 1000, multiple_old.iat[23,2] / 1000), + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2020 = [dic_capacity_cost_overnight_2020, dic_effiency_2020, dic_fixom_cost_2020, dic_lifetime_2020, dic_marginal_cost_2020] +dic_2045 = [dic_capacity_cost_overnight_2045,dic_effiency_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2020 = [] +val_2045 = [] + +# Berechnungen +for dic in dic_2020: + val_2020.append(get_agg_price_2020(dic)) + +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2020, val_2045) + +``` diff --git a/digipipe/store/raw/technology_data/metadata.json b/digipipe/store/raw/technology_data/metadata.json new file mode 100644 index 00000000..15a40264 --- /dev/null +++ b/digipipe/store/raw/technology_data/metadata.json @@ -0,0 +1,114 @@ +{ + "name": "technology_data", + "title": "Technologiedaten", + "id": "technology_data", + "description": "Jahresvollaststunden, Leistungsdichte, Nennleistung, Kosten und Effizienzen von energieumwandlungs Technologien", + "language": [ + "de-DE", + "en-GB" + ], + "subject": [], + "keywords": [ + "technologiedaten", + "Jahresvollaststunden", + "Nennleistung", + "Kosten", + "Effizienzen" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Europe", + "extent": "Europe", + "resolution": "" + }, + "temporal": { + "referenceDate": null, + "timeseries" : null + }, + "sources": [ + { + "title": "Föderal Erneuerbar", + "description": "Jahresvollaststunden, Leistungsdichte, Nennleistung, Kosten und Effizienzen von energieumwandlungs Technologien", + "path": "https://www.foederal-erneuerbar.de", + "licenses": null + }, + { + "title": "PV- und Windflächenrechner", + "description": "Der Photovoltaik- und Windflächenrechner - Methoden und Daten", + "path": "https://zenodo.org/record/6794558", + "licenses": null + }, + { + "title": "Ariadne Szenarienreport", + "description": "Ariadne Szenarienreport", + "path": "https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf", + "licenses": null + }, + { + "title": "Technologiedaten", + "description": "Kosten und Effizienzen von Energieumwandlungtechnologien", + "path": "https://ens.dk/en/our-services/projections-and-models/technology-data", + "licenses": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } +}, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } +} diff --git a/digipipe/store/utils.py b/digipipe/store/utils.py new file mode 100644 index 00000000..b6296fac --- /dev/null +++ b/digipipe/store/utils.py @@ -0,0 +1,172 @@ +""" +Helper functions for datasets +""" + +import inspect +import os +from pathlib import Path + +import geopandas as gpd +import pandas as pd + +from digipipe import store + + +def get_abs_store_root_path(): + """Get absolute path to data store + + Returns + ------- + PosixPath + Path to data store + """ + return Path(os.path.dirname(inspect.getfile(store))) + + +def get_abs_dataset_path(category, name, data_dir=False): + """Get absolute path to a dataset + + Parameters + ---------- + category : str + Category in data store, one of + ["raw", "preprocessed", "dataset", "appdata"] + name : str + Name of dataset (subdir) + data_dir : bool + If True, the subdir "data" where data lives is added to the path + + Returns + ------- + PosixPath + Path to dataset + """ + if category not in ["raw", "preprocessed", "datasets", "appdata"]: + raise ValueError(f"Category '{category}' not found.") + p = Path(get_abs_store_root_path()) / category / name + if data_dir is True: + p = p / "data" + return p + + +def create_tag_string_osmium(taglist): + """Create tag string required by osmium for extraction of OSM data. + + Parameters + ---------- + taglist : list of lists + List of tags, format: [[key_1, value_1], ... [key_n, value_n]] + + Returns + ------- + str + String with tags + """ + tag_string = "" + for k, v in taglist: + tag_string += "=".join([k, str(v)]) + " " + tag_string = tag_string[:-1] + return tag_string + + +def create_tag_string_ogr(taglist): + """Create tag string required by ogr2ogr for extraction of OSM data. + + Parameters + ---------- + taglist : list of lists + List of tags, format: [[key_1, value_1], ... [key_n, value_n]] + + Returns + ------- + dict + Tags (key: "tags") and filter conditions (key: "conditions") + """ + tag_conditions = ( + '-where "' + + " OR ".join( + [ + "=".join(['\\"' + tag + '\\"', '\\"' + str(val) + '\\"']) + for tag, val in taglist + ] + ) + + '"' + ) + tags = ",".join([tag for tag, _ in taglist]) + return {"tags": tags, "conditions": tag_conditions} + + +PATH_TO_REGION_MUNICIPALITIES_GPKG = ( + get_abs_dataset_path("datasets", "bkg_vg250_muns_region", data_dir=True) + / "bkg_vg250_muns_region.gpkg" +) +PATH_TO_REGION_DISTRICTS_GPKG = ( + get_abs_dataset_path( + "datasets", "bkg_vg250_districts_region", data_dir=True + ) + / "bkg_vg250_districts_region.gpkg" +) + + +def get_names_from_nuts(gpkg: Path, regions_nuts: list) -> list: + """ + Extract region names from a GeoPackage file for specified NUTS codes. + + This function reads a GeoPackage file using GeoPandas, filters the + data to select rows where the 'nuts' column matches the provided NUTS + codes, and returns a list of region names from the 'name' column + + Parameters + ---------- + gpkg : pathlib.Path + A GeoPackage representing geographical data file. + regions_nuts : list + A list of NUTS codes to filter the data by + + Returns + ------- + list + A list of region names corresponding to the provided NUTS codes + + """ + gdf = gpd.read_file(gpkg) + filtered_gdf = gdf[gdf["nuts"].isin(regions_nuts)] + regions_names = filtered_gdf["name"].tolist() + return regions_names + + +def df_merge_string_columns( + df: pd.DataFrame, source_delimiter: str = ";", target_delimiter: str = "; " +) -> pd.Series: + """ + Merge delimiter-separated strings in columns of DataFrame into new column + with unique values. Empty values are dropped. + + Parameters + ---------- + df : pd.DataFrame + Dataframe with columns to be merged + source_delimiter : str + Delimiter in original strings of all columns + target_delimiter : str + Desired delimiter in resulting Series + + Returns + ------- + pd.Series + Column with joined strings + """ + df_result = df.copy() + for col in df.columns: + df_result[col] = df[col].apply( + lambda f: "|".join( + [_ for _ in set(f.split(source_delimiter)) if _ != ""] + ) + ) + s = df_result.agg("|".join, axis=1) + + return s.apply( + lambda f: target_delimiter.join( + [_ for _ in set(f.split("|")) if _ != ""] + ) + ) diff --git a/digipipe/workflow/Snakefile b/digipipe/workflow/Snakefile new file mode 100644 index 00000000..5b8e7e5d --- /dev/null +++ b/digipipe/workflow/Snakefile @@ -0,0 +1,118 @@ +""" +Snakemake file for digipipe +""" + +from snakemake.utils import min_version + +min_version("6.0") + +from digipipe.scripts.config import load_dataset_configs +from digipipe.scripts.data_io import * +from digipipe.store.utils import get_abs_dataset_path, get_abs_store_root_path +from digipipe.config import GLOBAL_CONFIG + +config = GLOBAL_CONFIG +config.update(load_dataset_configs()) + +# Path to esys appdata +APPDATA_ESYS_PATH = get_abs_dataset_path("appdata", "esys") +DATASETS_ESYS_PATH = os.path.join( + get_abs_dataset_path("datasets", "esys_raw"), "data" +) + + +# Include store modules +include: "../store/preprocessed/module.smk" +include: "../store/datasets/module.smk" +include: "../store/appdata/module.smk" +# Include esys snakefiles +include: "../esys/Snakefile" + + +# ===== RULES ===== + + +rule all: + input: + app_datapackage=rules.appdata_datapackage_create_datapackage.output, + esys_appdata=rules.make_esys_appdata.output, + + +# run: +# print("CONFIG MAIN:") +# print(config) +# print(workflow.basedir) + + +rule clean: + """ + Remove all output and temporary files. + """ + params: + preprocessed=expand( + get_abs_dataset_path("preprocessed", "{name}", data_dir=True), + name=config.get("store")["preprocessed"].keys(), + ), + datasets=expand( + get_abs_dataset_path("datasets", "{name}", data_dir=True), + name=config.get("store")["datasets"].keys(), + ), + shell: + """ + for paths in "{params.preprocessed}" "{params.datasets}"; do + # Delete subdirs in dirs + for DIR_PATH in $paths; do + # Check if there are subdirs in the dir path + if [ "$(find "$DIR_PATH" -mindepth 1 -type d)" ]; then + # Remove all files and subdirectories + rm -rf "$DIR_PATH"/* + else + # If there are no subdirs, remove all files directly in DIR_PATH + rm -f "$DIR_PATH"/* + fi + done + done + # Check if there are subdirectories in appdata esys dir + if [ "$(find {APPDATA_ESYS_PATH} -mindepth 1 -type d)" ]; then + rm -r {APPDATA_ESYS_PATH}/* + fi + if [ "$(find {DATASETS_ESYS_PATH} -mindepth 1 -type d)" ]; then + rm -r {DATASETS_ESYS_PATH}/* + fi + echo "Removed all preprocessed data in directories: preprocessed, datasets and appdata." + """ + + +rule download_raw_zip: + """ + Downloads zipfile from the cloud containing the raw data and stores it in 'store/temp' + """ + output: + raw_zip=get_abs_store_root_path() / "temp" / "raw.zip", + params: + url=config["global"]["input_data"]["download_url"], + run: + try: + download_file(params.url, output.raw_zip) + except Exception as e: + raise RuntimeError(f"Error downloading file from {params.url}: {e}") + + +rule update_raw: + """ + Extracts 'raw.zip' and copies containing folders to 'store/raw' + """ + input: + raw_zip=get_abs_store_root_path() / "temp" / "raw.zip", + params: + temp_dir=get_abs_store_root_path() / "temp", + raw_dir=get_abs_store_root_path() / "raw", + temp_raw_dir=get_abs_store_root_path() / "temp" / "store" / "raw", + run: + try: + extract_zipfile(input.raw_zip, params.temp_dir) + copy_files(params.temp_raw_dir, params.raw_dir) + clean_folder(params.temp_dir) + except Exception as e: + clean_folder(params.temp_dir) + raise RuntimeError(f"Error updating raw data: {e}") diff --git a/digipipe/workflow/WORKFLOW.md b/digipipe/workflow/WORKFLOW.md new file mode 100644 index 00000000..2735ff23 --- /dev/null +++ b/digipipe/workflow/WORKFLOW.md @@ -0,0 +1,73 @@ +# Workflow + +## Download and update RAW dataset + +To run the pipeline some input datasets are required: + +To download, extract and copy a current set of raw data into `store/raw`, type + + snakemake -j update_raw + +A zip file from a prespecified +[URL](https://wolke.rl-institut.de/s/aN2ccptGtFsFiDs/download) +is downloaded and unzipped to `store/temp/`. The raw data files are copied to +the corresponding folders in `store/raw/`. +A prompt asks if an already existing file should be updated. Confirm with "y" +or type "n" to skip. + +The following additional files must be downloaded manually: + +- [OpenStreetMap](https://download.geofabrik.de/europe/germany-230101.osm.pbf) + --> place in `store/raw/osm/data/` + +## Run + +To run the pipeline, go to Digipipe's root `digipipe/` or to +`digipipe/workflow/` and type + + snakemake -j + +while `NUMBER_OF_CPU_CORES` is the number of CPU cores to be used for the +pipeline execution. You can also make a dry-run (see what snakemake would do +but without actually really doing anything) by typing + + snakemake -n + +To clean all produced data, use + + snakemake -j1 clean + +This involves preprocessed data in directories: preprocessed, datasets and +appdata. + +## Pipeline visualization / DAG + +The entire pipeline can be visualized as a directed acyclic graph (DAG). +The following command creates the DAG as an svg file in the current directory: + + snakemake --dag | dot -Tsvg > dag_rules_full.svg + +As the full graph is too packed with information and therefore hardly to grasp, +consider to show only certain parts by disabling some target files in the `all` +rule. Also, a simple rule graph (the one shown above) can be created and saved +in the current directory using + + snakemake --rulegraph | dot -Tsvg > dag_rules_simple.svg + +To create a graph in the current directory showing the file dependencies, type + + snakemake --filegraph | dot -Tsvg > dag_files.svg + +The graphs also provide information on the completed (solid lines) and pending +(dashed lines) processing steps. For further details see +[Snakemake CLI docs](https://snakemake.readthedocs.io/en/stable/executing/cli.html). + +## Snakefiles and config + +- The global workflow is defined in the main + [Snakefile](../workflow/Snakefile). +- It includes the module Snakefiles from the data store located at + - [store/preprocessed/module.smk](../store/preprocessed/module.smk) and + - [store/datasets/module.smk](../store/datasets/module.smk) +- In each of these modules, the rules as well as the config from the contained + datasets are imported. diff --git a/digipipe/workflow/utils.py b/digipipe/workflow/utils.py new file mode 100644 index 00000000..b4702385 --- /dev/null +++ b/digipipe/workflow/utils.py @@ -0,0 +1,23 @@ +""" +Helper functions for the workflow +""" + + +def search_data_workflows(): + """Search for snakefiles (*.smk) in data store but exclude templates. + + TODO: Used in outdated manual importing of snakemake files. + TODO: Remove, if modularization works out. + + Returns + ------- + list + Paths to Snakemake files + """ + # smk_files = [] + # for root, dirs, files in os.walk(get_abs_store_root_path()): + # for file in files: + # if file.endswith(".smk") and ".TEMPLATE" not in str(root): + # smk_files.append(Path(os.path.join(root, file))) + # return smk_files + raise NotImplementedError diff --git a/docs/datasets/appdata_datasets.md b/docs/datasets/appdata_datasets.md new file mode 100644 index 00000000..e8014915 --- /dev/null +++ b/docs/datasets/appdata_datasets.md @@ -0,0 +1,31 @@ +# 'Appdata' Datasets + +------------------------------ +## Datapackage für App + +Von der App benötigte Daten in JSON `datapackage_app.json`. + +Generelle Struktur: + +- `` + - `` (`scalars`, `sequences` oder `geodata`) + - `` + - `description`: Beschreibung + - `path`: Pfad zur Zieldatei im Datapackage + - `fields`: Felder-/Spaltendefinition + - `_source_path`: Pfad zur Datei im Quelldatensatz + - `dataset`: Name in `store/datasets/` + - `file`: Datei + +Kategorien bzw. Inhalt `resources`: + +- `base_data`: Basisdaten +- `production`: Energiebereitstellung +- `demand`: Energiebedarf +- `potential_areas`: Potenzialgebiete EE +- `emissions`: Emissionen +- `settings`: App-Settings +- `captions`: App-Captions + +**Dataset: `appdata/datapackage`** + diff --git a/docs/datasets/datasets_datasets.md b/docs/datasets/datasets_datasets.md new file mode 100644 index 00000000..ff920b83 --- /dev/null +++ b/docs/datasets/datasets_datasets.md @@ -0,0 +1,945 @@ +# 'Datasets' Datasets + +------------------------------ +## Technologiedaten + +Allgemeine Technologiedaten. + +Raw dataset: [technology_data](../../digipipe/store/raw/technology_data/dataset.md) + +**Dataset: `datasets/technology_data`** + + +------------------------------ +## Potenzialgebiete Windenergie + +Potenzialgebiete für die Errichtung von Windenergieanlagen, basierend auf den +Teilplänen Wind der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg +aus +[rpg_abw_regional_plan](../../digipipe/store/preprocessed/rpg_abw_regional_plan/dataset.md). + +Dateien: + +- STP Wind 2018 - Vorrang-/Eignungsgebiete: + `potentialarea_wind_stp_2018_vreg.gpkg` +- STP Wind 2027 - Planabsicht Vorranggebiete: + `potentialarea_wind_stp_2027_vr.gpkg` +- STP Wind 2027 - Planabsicht Repoweringgebiete: + `potentialarea_wind_stp_2027_repowering.gpkg` +- STP Wind 2027 - Suchraum Wald: + `potentialarea_wind_stp_2027_search_area_forest_area.gpkg` +- STP Wind 2027 - Suchraum Offenland: + `potentialarea_wind_stp_2027_search_area_open_area.gpkg` + +Die darin verwendeten Attributtexte werden in die Datei +`potentialarea_wind_attribute_captions.json` exportiert. + +Die Flächen werden mit den Gemeindegrenzen verschnitten und den Gemeinden +zugeordnet. Je Gemeinde und obigem Flächentyp/Datei wird eine Flächensumme (in +km²) berechnet, siehe `potentialarea_wind_area_stats_muns.csv`. Die Gemeinden +werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. + +**Dataset: `datasets/potentialarea_wind_region`** + + +------------------------------ +## Speicheranlagen + +Speicheranlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Es wird weiterhin geprüft, ob dem Speicher eine oder mehrere PV-Aufdachanlagen +zugeordnet sind, es wird die Anzahl und Summe der Nettonennleistung berechnet. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Weiterhin erfolgt eine Auswertung der installierten Gesamtleistung je Gemeinde: + +- Alle Speicher: `bnetza_mastr_storage_stats_muns.csv` +- Großspeicher (>=100 kWh): `bnetza_mastr_storage_large_stats_muns.csv` +- Kleinspeicher (<100 kWh): `bnetza_mastr_storage_small_stats_muns.csv` + +`bnetza_mastr_storage_pv_roof.json` enthält die spezifische Speicherkapazität +sowie spezifische Nennleistung der Speicher (bezogen auf die installierte +Leistung von PV-Aufdachanlagen), aggregiert für gesamte Region, für folgende +Randbedingungen: + +- Alle PV-Anlagen: `all_storages` +- PV-Anlagen mit 2..20 kWp sowie Batteriespeicher <20 kWh und <20 kW (kann in + [config.yml](../../digipipe/store/datasets/bnetza_mastr_storage_region/config.yml) unter `home_storages` konfiguriert werden): + `home_storages` + +**Dataset: `datasets/bnetza_mastr_storage_region`** + + +------------------------------ +## EE-Einspeisezeitreihen + +Einspeisezeitreihen für Erneuerbare Energien. Als Wetterjahr wird 2011 +verwendet, siehe [Szenarien](../../digipipe/store/../../docs/sections/scenarios.md). + +Raw dataset mit methodischer Beschreibung: +[renewables.ninja_feedin](../../digipipe/store/raw/renewables.ninja_feedin/dataset.md) + +### Einspeisezeitreihen + +Zeitreihe normiert auf Summe=1 für + +- Windenergie: `wind_feedin_timeseries.csv` +- Photovoltaik: `pv_feedin_timeseries.csv` +- Solarthermie: `st_feedin_timeseries.csv` +- Laufwasserkraft: `ror_feedin_timeseries.csv` + +**Dataset: `datasets/renewable_feedin`** + + +------------------------------ +## Emissionen + +Emissionen für Sachsen-Anhalt und die Region, aggregiert nach Sektoren der +CRF-Nomenklatur. + +Datei `emissions.json` enthält Chartdaten. + +Raw dataset: [emissions](../../digipipe/store/raw/emissions/dataset.md) + +**Dataset: `datasets/emissions_region`** + + +------------------------------ +## Geo- oder Solarthermie-, Grubengas- und Klärschlamm-Anlagen + +Anlagen der Geo- oder Solarthermie, Grubengas und Klärschlamm in der Region +aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_gsgk_stats_muns.csv`. + +**Dataset: `datasets/bnetza_mastr_gsgk_region`** + + +------------------------------ +## Strombedarf + +Nettostrombedarfe und -zeitreihen für Haushalte, GHD und Industrie je Gemeinde. + +Die Berechnung der regionalen Prognosewerte je Verbrauchssektor erfolgt anhand +landesweiter Prognosen aus den +[BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md). + +### Haushalte + +- Jährlicher Strombedarf je Gemeinde in MWh aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md), von Landkreis- auf + Gemeindeebene disaggregiert anhand von Bevölkerungsprognosen + ([STALA ST](../../digipipe/store/preprocessed/stala_st_pop_prog/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + Strombedarfs (ohne Wärmegewinnung) aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +### GHD + +- Jährlicher Strombedarf je Gemeinde in MWh aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md), von Landkreis- auf + Gemeindeebene disaggregiert anhand von sozialversicherungspflichtig + Beschäftigten im Jahr 2022 + ([BA für Arbeit](../../digipipe/store/preprocessed/ba_employment/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + Strombedarfs (ohne Wärmegewinnung) aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird. Basis bilden + sowohl SLP- als auch branchenspezifische Profile. Aufgrund der geringen + Differenzen zwischen den Landkreisen werden diese gemittelt. Differenzen + zwischen verschiedenen Jahren bestehen nur aufgrund der Lage von Wochenenden + und Feiertagen und werden daher vernachlässigt. + +### Industrie + +- Jährlicher Strombedarf je Gemeinde in MWh. Hierfür stehen 2 Datensätze zur + Verfügung - welcher verwendet wird, kann in der [Konfiguration](../../digipipe/store/datasets/demand_electricity_region/config.yml) + via `ind_electricity_demand_source` eingestellt werden: + - [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md): Werte für alle + Landkreise in Deutschland. + - [STALA ST](../../digipipe/store/preprocessed/stala_st_energy/dataset.md) (Standard): + Genauere Werte, jedoch nur für Sachsen-Anhalt verfügbar. +- Die Desaggregation von Landkreis- auf Gemeindeebene erfolgt anhand der + Beschäftigten im verarbeitenden Gewerbe im Jahr 2022 + ([Regionalstatistik](../../digipipe/store/preprocessed/regiostat/dataset.md)). +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + industriellen Gesamtenergiebedarfs aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Im Unterschied zu Haushalten und GHD liegen die Daten für den + Wärme- und Stromanteil nicht getrennt vor, sodass auf den + Gesamtenergiebedarf zurückgegriffen wird. + Es wird das Szenario "TN-Strom" als Grundlage für den Status quo verwendet und + Werte für 2022 interpoliert. Die Zielwerte werden dem Szenario "T45-Strom" + entnommen. +- Gemittelte, normierte Strombedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022, die + für alle Zielszenarien und Aggregationsebenen verwendet wird. Basis bilden + sowohl SLP- als auch branchenspezifische Profile. Aufgrund der geringen + Differenzen zwischen den Landkreisen werden diese gemittelt. Differenzen + zwischen verschiedenen Jahren bestehen nur aufgrund der Lage von Wochenenden + und Feiertagen und werden daher vernachlässigt. + +**Dataset: `datasets/demand_electricity_region`** + + +------------------------------ +## Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft aus Datensatz +[rpg_abw_pv_roof_potential](../../digipipe/store/raw/rpg_abw_pv_roof_potential/dataset.md). + +Die Gebäudezentroide werden mit den Gemeindegrenzen verschnitten und den +Gemeinden zugeordnet. +Ergebnisdaten: + +- Alle Gebäude: `potentialarea_pv_roof_area_stats_muns.csv` +- Alle nicht denkmalgeschützten Gebäude: + `potentialarea_pv_roof_wo_historic_area_stats_muns.csv` + +Des Weiteren wird je Gemeinde der relative Anteil der bereits installierten +Anlagenleistung an der theoretisch installierbaren Leistung (bei +100% Dachnutzung) berechnet. +Ergebnisdaten: + +- Alle Gebäude: `potentialarea_pv_roof_deployment_stats_muns.csv` +- Alle nicht denkmalgeschützten Gebäude: + `potentialarea_pv_roof_wo_historic_deployment_stats_muns.csv` + +Die Gemeinden werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. + +### Ausbauziele + +Es werden PV-Ausbauziele für die Region berechnet, indem die Bundesziele aus den +[BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) +i.H.v. 428 GW +([§4 EEG 2023](https://www.gesetze-im-internet.de/eeg_2014/__4.html): 400 GW) +anhand der Gebäudegrundflächen disaggregiert werden. Hierzu wird der Anteil der +Gebäudegrundflächen in der Region an der bundesweiten Gebäudegrundflächen +berechnet (s. Datensatz [osm_buildings](../../digipipe/store/datasets/osm_buildings/dataset.md)) und die +Ziele linear skaliert. Da in den o.g. Ausbauzielen nicht zwischen Freiflächen- +und Aufdach-PV unterschieden wird, wird ein Verhältnis von 50:50 angenommen, +d.h. bundesweit 214 GW auf Aufdach-PV entfallen. + +Der Anteil beträgt 0,62 % und das Leistungsziel damit 1327 MW, s. +`potentialarea_pv_roof_regionalized_targets.json`. + +**Dataset: `datasets/potentialarea_pv_roof_region`** + + +------------------------------ +## Biomasse-/Biogasanlagen + +Biomasse-/Biogasanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_biomass_stats_muns.csv`. + +**Dataset: `datasets/bnetza_mastr_biomass_region`** + + +------------------------------ +## Staat + +Staatsgrenze aus Geodaten der Verwaltungsgebiete extrahiert und nach Landmasse +gefiltert (Geofaktor 4 = "mit Struktur Land"). + +**Dataset: `datasets/bkg_vg250_state`** + + +------------------------------ +## Wärmepumpen COP + +Zeitreihe für die Leistungszahl / Coefficient of performance (COP) für +Wärmepumpen. Berücksichtigt werden Luftwärmepumpen (ASHP) und Erdwärmepumpen +(GSHP). Der COP wird mit Hilfe von Zeitreihen der Umgebungstemperatur (ASHP) +bzw. der Bodentemperatur (GSHP) für jeden Zeitschritt berechnet. + +Details zur Berechnungsmethodik können der Dokumentation von +[oemof.thermal](https://oemof-thermal.readthedocs.io/en/latest/compression_heat_pumps_and_chillers.html) +entnommen werden. + +Annahmen: + +- Vorlauftemperatur: 40 °C +- Gütegrad / Quality grade: 0.4 (nach + [VDE](https://www.energiedialog2050.info/wp-content/uploads/simple-file-list/VDE_ST_ETG_Warmemarkt_RZ-web.pdf)) +- Vereisungsverluste bei ASHP: 20 % bei <2 °C + +Daraus ergibt sich eine mittlere Jahresarbeitszahl (JAZ) von 3,3 für ASHP und +4,3 für GSHP, die mit typischen Werten für 2019 +([AEW](https://static.agora-energiewende.de/fileadmin/Projekte/2022/2022-04_DE_Scaling_up_heat_pumps/A-EW_273_Waermepumpen_WEB.pdf)) +übereinstimmen. Für das Zukunftsszenario wird ferner ein Effizienzgewinn durch +technische Weiterentwicklung von 25 % angenommen +[ewi](https://www.ewi.uni-koeln.de/cms/wp-content/uploads/2015/12/2014_06_24_ENDBER_P7570_Energiereferenzprognose-GESAMT-FIN-IA.pdf). + +Beide separat erstelle Zeitreihen werden anhand der heutigen Marktdurchdringung +gewichtet und in eine mittlere Zeitreihe für Wärmepumpen überführt. Im Jahr +2022 wurden 87 % ASHP und 13 % GSHP abgesetzt nach +[BWP](https://www.waermepumpe.de/fileadmin/user_upload/waermepumpe/05_Presse/01_Pressemitteilungen/BWP_Branchenstudie_2023_DRUCK.pdf), +über die letzten 10 Jahre beträgt das Verhältnis ca. 80:20. +Für 2045 wird daher ein Anteil von 80 % ASHP und 20 % GSHP angenommen. + +Verwendet Datensätze: + +- [dwd_temperature](../../digipipe/store/preprocessed/dwd_temperature/dataset.md) + +**Dataset: `datasets/heatpump_cop`** + + +------------------------------ +## Photovoltaik-Freiflächenanlagen + +Photovoltaik-Freiflächenanlagen in der Region aus MaStR-Registerdaten als +Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_pv_ground_stats_muns.csv`. + +### Datenkorrektur + +Einige Anlagen sind hinsichtlich Ihrer geografischen Lage oder Typs fehlerhaft. +Anhand des Datensatzes +[bnetza_mastr_correction_region](../../digipipe/store/raw/bnetza_mastr_correction_region/dataset.md) +wird für diese Anlagen eine Datenkorrektur vorgenommen. + +**Dataset: `datasets/bnetza_mastr_pv_ground_region`** + + +------------------------------ +## Landkreise + +Landkreise der Region aus Geodaten der Verwaltungsgebiete extrahiert und +nach Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). + +**Dataset: `datasets/bkg_vg250_districts_region`** + + +------------------------------ +## Potenzialgebiete PV-Freiflächen + +### Potenzialflächen + +Potenzialgebiete für die Errichtung von PV-Freiflächenanlagen aus dem +[PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/) +(s. Datensatz [rli_pv_wfr](../../digipipe/store/raw/rli_pv_wfr/dataset.md)). + +Die Potenzialflächen bilden jene Flächen ab, die für die Nutzung durch +Freiflächen-Photovoltaikanlagen grundsätzlich zur Verfügung stehen. Sie +orientieren sich an der aktuellen Förderkulisse und wurden anhand des +Flächenumfangs sowie den verfügbaren Geodaten ausgewählt: Von den in §37 EEG +2021 definierten Flächen werden Flächen nach §37 Absatz 1 Nummer 2 Buchstaben c, +h und i berücksichtigt (für Details zur Methodik siehe +[methodisches Begleitdokument](https://zenodo.org/record/6794558) zum PV- und +Windflächenrechner). + +Dateien: + +- Freiflächen-PV auf Acker- und Grünlandflächen mit geringer Bodengüte (Soil + Quality Rating (SQR) < 40): `potentialarea_pv_agriculture_lfa-off_region.gpkg` +- Potenzialflächen für Freiflächen-PV entlang von Bundesautobahnen und + Schienenwegen (500m-Streifen): `potentialarea_pv_road_railway_region.gpkg` + +### Statistische Auswertung + +Die Flächen werden mit den Gemeindegrenzen verschnitten und den Gemeinden +zugeordnet. Je Gemeinde und obigem Flächentyp/Datei wird eine Flächensumme (in +km²) berechnet, siehe `potentialarea_pv_ground_area_stats_muns.csv`. Die +Gemeinden werden über den Schlüssel `municipality_id` (vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) +identifiziert. + +Des Weiteren werden die Flächenanteile der verfügbaren Potenzialgebiete - deren +Nutzung nur eingeschränkt möglich ist (z.B. durch Naturschutzgebieten etc.) - +gegenüber den gesamten Potenzialgebiete (für die Parametrierung der Regler) nach +`potentialarea_pv_ground_area_shares.json` exportiert. + +### Ausbauziele + +Es werden PV-Ausbauziele für die Region berechnet, indem die Bundesziele aus den +[BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) +i.H.v. 428 GW +([§4 EEG 2023](https://www.gesetze-im-internet.de/eeg_2014/__4.html): 400 GW) +anhand der regional verfügbaren Potenzialflächen disaggregiert werden. Hierzu +wird der Anteil der Flächensumme der beiden o.g. Flächentypen an den bundesweit +verfügbaren Flächen (Datensatz [rli_pv_wfr](../../digipipe/store/raw/rli_pv_wfr/dataset.md)) +berechnet. Da in den o.g. Ausbauzielen nicht zwischen Freiflächen- und +Aufdach-PV unterschieden wird, wird ein Verhältnis von 50:50 angenommen, d.h. +bundesweit 214 GW auf Freiflächen-PV entfallen. + +Es ergeben sich folgende Flächen- und Leistungsanteile: + +Gesamt: 0.38 % (819 MW) + +- Entlang von BAB und Schienenwegen: 0.13 % (278 MW) +- Acker- und Grünlandflächen mit geringer Bodengüte: 0.25 % (541 MW) + +Ergebnisse in `potentialarea_pv_ground_regionalized_targets.json` + +**Dataset: `datasets/potentialarea_pv_ground_region`** + + +------------------------------ +## Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gesamtanzahl sozialversicherungspflichtig Beschäftigte und Betriebsstätten +je Gemeinde für die Region. + +Raw datasets: +[ba_employment](../../digipipe/store/raw/ba_employment/dataset.md), +[regiostat](../../digipipe/store/raw/regiostat/dataset.md) + +**Dataset: `datasets/employment_region`** + + +------------------------------ +## Bevölkerungsentwicklung + +EinwohnerInnen je Gemeinde: Historische Daten und Prognosen + +### Historische Daten bis 2022 + +Statistisches Bundesamt (Raw dataset: +[destatis_gv](../../digipipe/store/raw/destatis_gv/dataset.md)) + +### Prognosen bis 2035 + +Statistisches Landesamt Sachsen-Anhalt (Raw dataset: +[stala_st_pop_prog](../../digipipe/store/raw/stala_st_pop_prog/dataset.md)). Deaktivieren +mittels entfernen der Zieljahre in [config.yml](../../digipipe/store/datasets/population_region/config.yml) im Abschnitt +`prognosis_fstate_munlevel`. + +Kann für andere Regionen auch durch DemandRegio (s.u.) ersetzt werden, die +tatsächliche regionale Auflösung wird dadurch reduziert. + +### Prognosen bis 2045 + +DemandRegio (Raw dataset: [demandregio](../../digipipe/store/raw/demandregio/dataset.md)) +basierend auf der +[14. koordinierten Bevölkerungsvorausberechnung](https://www.destatis.de/DE/Themen/Gesellschaft-Umwelt/Bevoelkerung/Bevoelkerungsvorausberechnung/aktualisierung-bevoelkerungsvorausberechnung.html) +der Statistischen Ämter von Bund und Ländern. Diese Daten liegen auf +Landkreisebene vor, daher erfolgt eine gleichmäßige Skalierung der +dazugehörigen Gemeinden auf den jeweiligen Prognosewert. + +Deaktivieren mittels entfernen der Zieljahre in [config.yml](../../digipipe/store/datasets/population_region/config.yml) im +Abschnitt `prognosis_germany_districtlevel`. + +### Extrapolation + +Über 2045 hinaus wird lineare Extrapolation auf Basis der letzten beiden +Prognosejahre unterstützt. Um diese zu aktivieren, müssen lediglich Zieljahre +in die [config.yml](../../digipipe/store/datasets/population_region/config.yml) im Abschnitt `extrapolation` eingetragen werden. + +**Dataset: `datasets/population_region`** + + +------------------------------ +## Windenergieanlagen + +Windenergieanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_wind_stats_muns.csv`. + +**Dataset: `datasets/bnetza_mastr_wind_region`** + + +------------------------------ +## Bundesländer + +Bundesländergrenzen aus Geodaten der Verwaltungsgebiete extrahiert und nach +Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). + +**Dataset: `datasets/bkg_vg250_federal_states`** + + +------------------------------ +## Photovoltaik-Aufdachanlagen + +Photovoltaik-Aufdachanlagen in der Region aus MaStR-Registerdaten als +Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_pv_roof_stats_muns.csv`. + +### Datenkorrektur + +Einige Anlagen sind hinsichtlich Ihrer geografischen Lage oder Typs fehlerhaft. +Anhand des Datensatzes +[bnetza_mastr_correction_region](../../digipipe/store/raw/bnetza_mastr_correction_region/dataset.md) +wird für diese Anlagen eine Datenkorrektur vorgenommen. + +**Dataset: `datasets/bnetza_mastr_pv_roof_region`** + + +------------------------------ +## Wasserkraftanlagen + +Wasserkraftanlagen in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_hydro_stats_muns.csv`. + +**Dataset: `datasets/bnetza_mastr_hydro_region`** + + +------------------------------ +## Wärmebedarf + +Wärmebedarfe (Endenergie) Fernwärme und dezentrale Wärme sowie Wärmezeitreihen +für Haushalte, GHD und Industrie je Gemeinde. + +### Gesamtwärmebedarf + +Die Berechnung der regionalen Prognosewerte je Verbrauchssektor erfolgt anhand +landesweiter Prognosen aus den +[BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md). + +#### Haushalte + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../digipipe/store/preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme, desaggregiert auf Gemeinden + mittels Wärmebedarfs-Rasterdaten aus 2015 (Wärmebedarfsdichte 1ha) aus + [Peta5](../../digipipe/store/raw/seenergies_peta5/dataset.md). + Anm.: Die Desaggregation könnte alternativ über Zensus "Gebäude mit Wohnraum + nach Heizungsart" (31231-02-01-5, s. + [regiostat](../../digipipe/store/raw/regiostat/dataset.md) erfolgen) +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion der + Gebäudewärmebedarfe aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +#### GHD + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../digipipe/store/preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme, desaggregiert auf Gemeinden + mittels Wärmebedarfs-Rasterdaten aus 2015 (Wärmebedarfsdichte 1ha) aus + [Peta5](../../digipipe/store/raw/seenergies_peta5/dataset.md) +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion der + Gebäudewärmebedarfe aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Hierbei wird das Szenario "TN-Strom" als Grundlage für den Status + quo verwendet und Werte für 2022 interpoliert. Die Zielwerte werden dem + Szenario "T45-Strom" entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. + +#### Industrie + +- Jährlicher Wärmebedarf je Gemeinde in MWh: Bundeswert aus + [AG Energiebilanzen](../../digipipe/store/preprocessed/ageb_energy_balance/dataset.md) + 2021 für Raumwärme, Warmwasser und Prozesswärme. Die Desaggregation auf + Landkreisebene erfolgt anhand des Gesamtenergiebedarfs im verarbeitenden + Gewerbe aus [Regionalstatistik](../../digipipe/store/preprocessed/regiostat/dataset.md). + Die anschließende Desaggregation auf Gemeindeebene wird mittels + Beschäftigtenzahlen im verarbeitenden Gewerbe in 2022 aus + [Regionalstatistik](../../digipipe/store/preprocessed/regiostat/dataset.md) vorgenommen. +- Prognosewerte für 2045 werden durch lineare Skalierung mittels Reduktion des + industriellen Gesamtenergiebedarfs aus + [BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) + berechnet. Im Unterschied zu Haushalten und GHD liegen die Daten für den + Wärme- und Stromanteil nicht getrennt vor, sodass auf den + Gesamtenergiebedarf zurückgegriffen wird. + Es wird das Szenario "TN-Strom" als Grundlage für den Status quo verwendet und + Werte für 2022 interpoliert. Die Zielwerte werden dem Szenario "T45-Strom" + entnommen. +- Gemittelte, normierte Gasbedarfszeitreihe (auf 1 MWh) aus + [DemandRegio](../../digipipe/store/preprocessed/demandregio/dataset.md)-Daten von 2022 die + für alle Zielszenarien und Aggregationsebenen verwendet wird, da die Basis + SLP-Profile sind und Differenzen zwischen verschiedenen Jahren nur aufgrund + der Lage von Wochenenden und Feiertagen bestehen. Diese werden daher + vernachlässigt. +- Es erfolgt keine Aufteilung des Wärmebedarfs auf unterschiedliche + Temperaturniveaus. + +### Dezentrale Wärme und Fernwärme + +Der Gesamtwärmebedarf wird auf dezentrale Heizsysteme und Fernwärme aufgeteilt. +Fernwärmenetze existieren in Dessau-Roßlau, Bitterfeld-Wolfen, Köthen und +Wittenberg. + +Da keine Daten zum tatsächlichen Fernwärmebedarf vorliegen, werden Annahmen auf +Basis folgender Quellen getroffen: + +- [Zensus 2011: Gebäude nach Heizungsart](https://www.regionalstatistik.de/genesis//online?operation=table&code=31211-04-01-5-B) +- [BMWK Langfristszenarien: Wärmenachfrage in Wärmenetzen (HH&GHD) (2025)](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/54022/62a2667df6f8c176ff129f7ede944837) +- [STALA ST: Wärmebilanz der Industriebetriebe (2021)](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c256237) +- [STALA ST: Energie- und Wasserversorgung](https://statistik.sachsen-anhalt.de/fileadmin/Bibliothek/Landesaemter/StaLa/startseite/Themen/Energie/Berichte/6E403_2020-A.pdf) +- [WindNODE](https://windnode-abw.readthedocs.io/en/latest/energy_system_model.html#district-heating) +- [Peta5: D5 1 District Heating Areas (2020)](https://s-eenergies-open-data-euf.hub.arcgis.com/datasets/b62b8ad79f0e4ae38f032ad6aadb91a0_0/) + +Annahmen zu Fernwärmeanteilen (Anteil der Endenergie aus Fernwärme an gesamter +Wärme-Endenergie) je Bedarfssektor: + +| Fernwärmenetz | Haushalte | GHD | Industrie | +|-------------------|----------:|-----:|----------:| +| Dessau-Roßlau | 0,36 | 0,36 | 0,19 | +| Bitterfeld-Wolfen | 0,11 | 0,11 | 0,21 | +| Köthen | 0,07 | 0,07 | 0,21 | +| Wittenberg | 0,15 | 0,15 | 0,01 | + +Die Fernwärmeanteile können in der [config.yml](../../digipipe/store/datasets/demand_heat_region/config.yml) im Abschnitt +`district_heating_share` für jeden Sektor separat angepasst werden. Es wird +vereinfachend angenommen, dass der Anteil an Fernwärme für alle +Szenarien/Zieljahre gleich bleibt. + +### Beheizungsstruktur + +Die Beheizungsstruktur für 2020 und 2045 wird den +[BMWK Langfristszenarien](../../digipipe/store/preprocessed/bmwk_long_term_scenarios/dataset.md) +entnommen (Gebäude: Haushalte und GHD Energiebedarf) und für 2022 interpoliert. +Hierbei wird nach Technologien für dezentrale sowie Fernwärme unterschieden. +Für die Biomasse wird der relative Energiebedarf mit Hilfe von Anteilen der +installierten Leistung von spezifischen Biomasse-Konversionsanlagen +[dbfz_biomasss_capacity_rel](../../digipipe/store/preprocessed/dbfz_biomass_capacity_rel/dataset.md) +je Technologie aufgelöst. Der Vereinfachung halber wird angenommen, dass die +relative installierte Leistung der relativen Energiemenge entspricht. + +### Ergebnisdaten + +- Haushalte: Wärmebedarf gesamt: `demand_hh_heat_demand.csv` +- Haushalte: Wärmebedarf Fernwärme: `demand_hh_heat_demand_cen.csv` +- Haushalte: Wärmebedarf dezentrale Wärme: `demand_hh_heat_demand_dec.csv` +- Haushalte: Zeitreihen: `demand_hh_heat_timeseries.csv` + +- GHD: Wärmebedarf gesamt: `demand_cts_heat_demand.csv` +- GHD: Wärmebedarf Fernwärme: `demand_cts_heat_demand_cen.csv` +- GHD: Wärmebedarf dezentrale Wärme: `demand_cts_heat_demand_dec.csv` +- GHD: Zeitreihen: `demand_cts_heat_timeseries.csv` + +- Industrie: Wärmebedarf gesamt: `demand_ind_heat_demand.csv` +- Industrie: Wärmebedarf Fernwärme: `demand_ind_heat_demand_cen.csv` +- Industrie: Wärmebedarf dezentrale Wärme: `demand_ind_heat_demand_dec.csv` +- GHD: Zeitreihen: `demand_ind_heat_timeseries.csv` + +- Beheizungsstruktur dezentral (informativ): `demand_heat_structure_dec.csv` +- Beheizungsstruktur zentral (informativ): `demand_heat_structure_cen.csv` +- Beheizungsstruktur dezentral für Weiterverwendung im Energiesystem: + `demand_heat_structure_esys_dec.csv` +- Beheizungsstruktur Fernwärme für Weiterverwendung im Energiesystem: + `demand_heat_structure_esys_cen.csv` + +**Dataset: `datasets/demand_heat_region`** + + +------------------------------ +## OpenStreetMap Gebäude + +OSM Gebäude aus [osm_filtered](../../digipipe/store/preprocessed/osm_filtered/dataset.md) +mittels OGR extrahieren und nach Tags (s. [config.yml](../../digipipe/store/datasets/osm_buildings/config.yml)) filtern. + +Ziel ist die Ermittlung des regionalen Anteils Gebäudegrundflächen an der +gesamten Gebäudegrundfläche in Deutschland. + +Schritte: + +- Extraktion aller Gebäude in Deutschland --> `osm_buildings.gpkg` +- Zentroide und Fläche je Gebäude erstellen --> `osm_buildings_centroids.gpkg` +- Mit Region verschneiden --> `osm_buildings_centroids_region.gpkg` +- Flächensumme berechnen --> `osm_buildings_ground_area_region.gpkg`, + `osm_buildings_ground_area_country.gpkg` +- Regionalen Anteil berechnen --> `osm_buildings_ground_area_share_region.json` + +**Achtung:** Konvertierungs- und Extraktionsprozess benötigt ~15 GB +Speicherplatz und kann viel Zeit in Anspruch nehmen. + +**Dataset: `datasets/osm_buildings`** + + +------------------------------ +## Verbrennungskraftwerke + +Verbrennungskraftwerke in der Region aus MaStR-Registerdaten als Geopackage. +Es werden alle Anlagen berücksichtigt, die in Betrieb sind oder sich in +Planung befinden. Anlagen mit Geokoordinaten werden georeferenziert +übernommen, für Anlagen die keine Koordinaten aufweisen (üblicherweise <=30 +kW Nennleistung) erfolgt ein Geocoding anhand von PLZ und Ort, um eine +ungefähre Position bereit zu stellen. + +Neben einem anlagenscharfen Datensatz wird ein weiterer Datensatz erzeugt, +der alle Anlagen mit approximierter Position je Position zusammenfasst und +jeweils typische Kennwerte enthält (u.a. Anzahl Anlagen, Gesamtleistung). + +Jede Anlage wird anhand ihrer Lokation einer Gemeinde (Attribut +`municipality_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md)) und +einem Landkreis (Attribut `district_id`, vgl. +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_districts_region/dataset.md)) +zugeordnet. + +Zusätzlich erfolgt eine statistische Auswertung der installierten Leistung in +`bnetza_mastr_combustion_stats_muns.csv`. + +**Dataset: `datasets/bnetza_mastr_combustion_region`** + + +------------------------------ +## Gemeinden + +Gemeinden der Region aus Geodaten der Verwaltungsgebiete extrahiert und +nach Landmasse gefiltert (Geofaktor 4 = "mit Struktur Land"). + +**Dataset: `datasets/bkg_vg250_muns_region`** + + +------------------------------ +## Bezeichner und Namen aus MaStR + +Bezeichner und Namen aus MaStR als Mapping -> + wobei CamelCase aus in Leerzeichen konvertiert +werden. + +**Dataset: `datasets/bnetza_mastr_captions`** + + +------------------------------ +## Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/), +extrahiert, zu LAEA Europe (EPSG:3035) umprojiziert und auf die Regionsgrenzen +zugeschnitten. + +Preprocessed dataset: +[rli_pv_windflaechenrechner](../../digipipe/store/preprocessed/rli_pv_wfr/dataset.md) + +**Dataset: `datasets/rli_pv_wfr_region`** + + +------------------------------ +## Region + +Region aus Geodaten der Landkreise zusammengeführt. + +**Dataset: `datasets/bkg_vg250_region`** + + +------------------------------ +## Settings für App + +Einstellungen für die App. + +### Layerliste (rechtes Panel) + +- Konfiguration: [config.yml](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/config.yml) --> `map_panel_layer_list` +- Ergebnisfile: `map_panel_layer_list.json` +- Wird manuell in die App eingepflegt (s. + [map_config.py](https://github.com/rl-institut/digiplan/blob/main/digiplan/map/map_config.py)) + +### Settings panels + +Die im linken Panel aufgeführten Einstellelemente (Slider und Schalter) werden +hier parametriert. + +- Konfiguration des Templates: + [config.yml](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/config.yml) --> `panel_settings_templates` +- Parametrierung der Slider und Schalter: + [panels.py](https://github.com/rl-institut/digipipe/blob/main/digipipe/store/datasets/app_settings/scripts/panels.py) +- Ergebnisfiles: + - `energy_settings_panel.json` + - `heat_settings_panel.json` + - `traffic_settings_panel.json` +- Werden in die App eingelesen + +#### Parametrierung der Einstellelemente + +Für die Slider werden folgende Attribute gesetzt: +Minimum, Maximum, Schrittweite, Startwert, Status-quo-Wert, Zielwert 2045. +Diese werden wie folgt bestimmt (vgl. auch (i)-Tooltips an den Elementen): + +| **Technologie** | **Element id** | **Maximum** | **Startwert** | **Status-quo-Wert** | **Zielwert 2045** | +|-------------------------|----------------|-------------------------------------|---------------------|-------------------------------------------------|------------------------------------------------| +| Windenergie | `s_w_1` | Inst. Leistung in bestehenden VR/EG | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus Flächenziel Sachsen-Anhalt (2,2 % in 2032) | +| | `s_w_3` | - | Wie Status-quo-Wert | On | - | +| | `s_w_4` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_4_1` | - | Wie Status-quo-Wert | On | - | +| | `s_w_4_2` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_5` | - | Wie Status-quo-Wert | Off | - | +| | `s_w_5_1` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_w_5_2` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| Freiflächen-PV | `s_pv_ff_1` | | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus EEG 2023 und regionalen Potenzialen | +| | `s_pv_ff_3` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_pv_ff_4` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| Aufdach-PV | `s_pv_d_1` | | Wie Status-quo-Wert | Inst. Leistung 2022 | Aus EEG 2023 und regionalen Potenzialen | +| | `s_pv_d_3` | 100 % | Wie Status-quo-Wert | Theoret. Wert berechnet aus inst. Leistung 2022 | - | +| | `s_pv_d_4` | 100 % | Wie Status-quo-Wert | Aus MaStR | - | +| Wasserkraft | `s_h_1` | Inst. Leistung 2022 | Wie Status-quo-Wert | Inst. Leistung 2022 | Inst. Leistung 2022 | +| Stromverbrauch | `s_v_1` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_3` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_4` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `s_v_5` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| Batterie-Großspeicher | `s_s_g_1` | 50 % | Wie Status-quo-Wert | Aus inst. Kapazität und Einspeisung 2022 | - | +| | | | Wie Status-quo-Wert | | | +| WP dezentral | `w_d_wp_1` | 95 % | 50 % | Inst. Leistung 2022 aus BMWK Langfristszenarien | Wert 2045 aus BMWK Langfristszenarien | +| | `w_d_wp_3` | 95 % | 50 % | - | - | +| | `w_d_wp_4` | 95 % | 50 % | - | - | +| | `w_d_wp_5` | 95 % | 50 % | - | - | +| WP zentral | `w_z_wp_1` | 95 % | 50 % | Inst. Leistung 2022 aus BMWK Langfristszenarien | Wert 2045 aus BMWK Langfristszenarien | +| Wärmeverbrauch | `w_v_1` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_3` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_4` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| | `w_v_5` | 200 % | Wie Status-quo-Wert | Verbrauch 2022 (100 %) | Wert 2045 aus BMWK Langfristszenarien | +| Wärmespeicher dezentral | `w_d_s_1` | 200 % | 100 % | - | - | +| Wärmespeicher zentral | `w_z_s_1` | 200 % | 100 % | - | - | + +Die Maxima der Regler im Hauptpanel (`s_w_1`, `s_pv_ff_1` usw.) werden in der +App dynamisch aus den durch die UserInnen vorgenommenen Detaileinstellungen +(`s_w_3`, `s_pv_ff_1` usw.) berechnet. + +**Dataset: `datasets/app_settings`** + + +------------------------------ +## Captions + +Beschriftungen für WebApp. + +Dateien: + +- Felder: `captions_fields.json` + +**Dataset: `datasets/app_captions`** + diff --git a/docs/datasets/preprocessed_datasets.md b/docs/datasets/preprocessed_datasets.md new file mode 100644 index 00000000..e1a22d85 --- /dev/null +++ b/docs/datasets/preprocessed_datasets.md @@ -0,0 +1,243 @@ +# 'Preprocessed' Datasets + +------------------------------ +## Lokale Verwaltungseinheiten + +Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese +LAUs sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der +Europäischen Union. + +Daten aus Excel extrahiert und in CSV exportiert. + +**Dataset: `preprocessed/eurostat_lau`** + + +------------------------------ +## Bevölkerungsprognose Sachsen-Anhalt + +Bevölkerungsprognose je Gemeinde bis 2035 des Statistischen Landesamtes +Sachsen-Anhalt, extrahiert und konvertiert. + +Raw dataset: +[stala_st_pop_prog](../../digipipe/store/raw/stala_st_pop_prog/dataset.md) + +**Dataset: `preprocessed/stala_st_pop_prog`** + + +------------------------------ +## Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft, reprojizert. + +Raw dataset: +[rpg_abw_pv_roof_potential](../../digipipe/store/raw/rpg_abw_pv_roof_potential/dataset.md) + +**Dataset: `preprocessed/rpg_abw_pv_roof_potential`** + + +------------------------------ +## Temperatur + +Stündliche Mittelwerte der Luft- und Erdbodentemperatur für die Region ABW, +Mittelwert für alle Gemeinden. + +Verwendet: [dwd_temperature](../../digipipe/store/raw/dwd_temperature/dataset.md) + +**Dataset: `preprocessed/dwd_temperature`** + + +------------------------------ +## BMWK Langfristszenarien + +Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten +auf Landesebene, extrahiert. + +Raw dataset: +[bmwk_long_term_scenarios](../../digipipe/store/raw/bmwk_long_term_scenarios/dataset.md) + +**Dataset: `preprocessed/bmwk_long_term_scenarios`** + + +------------------------------ +## AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021 + +Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie +zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und +Anwendungszwecken für Jahre von 2011 bis 2021 der AG Energiebilanzen. + +Aus PDF extrahierte Tabellenwerte für Haushalte, GHD und Industrie. + +**Dataset: `preprocessed/ageb_energy_balance`** + + +------------------------------ +## sEEnergies Pan-European Thermal Atlas 5.2 (Peta5) + +Wärmebedarf (extrahiert) für Europa 2015 in GJ (1ha Auflösung) für + +- Haushalte: Raumwärme und Warmwasser +- GHD: Raumwärme, Warmwasser und Prozesswärme + +**Dataset: `preprocessed/seenergies_peta5`** + + +------------------------------ +## OpenStreetMap gefiltert + +OSM data nach bestimmten Tags (s. [config.yml](../../digipipe/store/preprocessed/osm_filtered/config.yml) --> `tags`) gefiltert, +zu LAEA Europe (EPSG:3035) umprojiziert und in ein Geopackage konvertiert. + +**Achtung:** Konvertierungs- und Extraktionsprozess benötigt ~50 GB +Speicherplatz und kann viel Zeit in Anspruch nehmen. + +**Dataset: `preprocessed/osm_filtered`** + + +------------------------------ +## Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gemeindedaten der sozialversicherungspflichtig Beschäftigten am 30.06.2022 nach +Wohn- und Arbeitsort - Deutschland, Länder, Kreise und Gemeinden (Jahreszahlen) +der Bundesagentur für Arbeit. +Anzahl Beschäftigte und Betriebe extrahiert und in CSV konvertiert. + +**Dataset: `preprocessed/ba_employment`** + + +------------------------------ +## Bevölkerung + +Einwohnerzahl nach Gemeinden des Statistischen Bundesamts für die Jahre +2010, 2015, 2020, 2021, 2022. + +**Dataset: `preprocessed/destatis_gv`** + + +------------------------------ +## Regionalstatistik (GENESIS) + +Enthält Datensätze der statistischen Ämter des Bundes und der Länder aus +[regiostat](../../digipipe/store/raw/regiostat/dataset.md). + +### Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4) + +Jahreserhebung ü. die Energieverwendung der Betriebe im verarbeitendem Gewerbe. + +Änderungen: + +- Dateiformat konvertiert +- Bundesland-, Kreis und Gemeindewerte extrahiert +- Energie in TWh konvertiert + +### Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5) + +Jahreserhebung ü. Betriebe, tätige Personen und Bruttoentgelte der Betriebe im +verarbeitendem Gewerbe. + +Änderungen: + +- Dateiformat konvertiert +- Bundesland-, Kreis und Gemeindewerte extrahiert + +**Dataset: `preprocessed/regiostat`** + + +------------------------------ +## Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/), +extrahiert. + +Raw dataset: +[rli_pv_windflaechenrechner](../../digipipe/store/raw/rli_pv_wfr/dataset.md) + +**Dataset: `preprocessed/rli_pv_wfr`** + + +------------------------------ +## Regionalplan Anhalt-Bitterfeld-Wittenberg + +Vorverarbeitete Datensätze aus Teilplänen Wind der Regionalen +Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg aus +[rpg_abw_regional_plan](../../digipipe/store/raw/rpg_abw_regional_plan/dataset.md). + +In der [config.yml](../../digipipe/store/preprocessed/rpg_abw_regional_plan/config.yml) können Einstellungen vorgenommen werden. + +**Dataset: `preprocessed/rpg_abw_regional_plan`** + + +------------------------------ +## Erzeugungsanlagen aus Marktstammdatenregister + +Erzeugungsanlagen aus dem MaStR für ausgewählte Technologien. + +**Dataset: `preprocessed/bnetza_mastr`** + + +------------------------------ +## Energiedaten Sachsen-Anhalt + +Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes +Sachsen-Anhalt, extrahiert und konvertiert. + +### Daten + +Stromverbrauch der Industriebetriebe nach Kreisen 2003-2021 in MWh + +- Datei: `power_demand_industry_st_districts.csv` + +Raw dataset: +[stala_st_energy](../../digipipe/store/raw/stala_st_energy/dataset.md) + +**Dataset: `preprocessed/stala_st_energy`** + +??? metadata "Metadata" + ```json + { + "Datenquellen": { + "Stromverbrauch der Industriebetriebe nach Kreisen": "https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986" + } + } + ``` + +------------------------------ +## DemandRegio + +Regionalisierte Bevölkerungsprognose sowie Strom-, Wärme und Gasbedarf auf +Landkreisebene, extrahiert. + +Enthält Jahresverbräuche und Zeitreihen für die Sektoren Haushalte, Gewerbe, +Handel, Dienstleistungen (GHD) und Industrie für mehrere Zieljahre. + +**Dataset: `preprocessed/demandregio`** + + +------------------------------ +## Anteile von Biomasse-Konversionsanlagen anhand installierter Leistung + +Berechnung der Anteile der installierten Leistung an der gesamten installierten +Leistung der Biomasse-Konversionsanlagen. + +Die installierten Leistungen werden +[dbfz_biomass_heat_capacities](../../digipipe/store/raw/dbfz_biomass_heat_capacities/dataset.md) +entnommen. Sie werden nach Energieträger (Biogas, Methan oder Holz) und +Technologie (BHKW (bpchp), Turbine mit Kondensationsentnahme (extchp) oder +Ofen (oven)) zusammengefasst. Anschließend wird der Anteil der installierten +Leistung an der gesamten installierten Leistung der Biomasse-Konversionsanlagen +berechnet. Der Einfachheit halber werden die Projektionen für 2050 dem Jahr +2045 und die für 2020 dem Jahr 2022 zugeordnet. Der Energieträger und die +Technologie (vgl. [dbfz_biomass_heat_capacities](../../digipipe/store/raw/dbfz_biomass_heat_capacities/dataset.md)) +werden in einer Spalte zusammengefasst. + +**Dataset: `preprocessed/dbfz_biomass_capacity_rel`** + + +------------------------------ +## Administative areas of Germany + +Geodata of administrative areas (Verwaltungsgebiete 1:250 000) extracted, +reprojected to LAEA Europe(EPSG:3035) and converted to Geopackage. + +**Dataset: `preprocessed/bkg_vg250`** + diff --git a/docs/datasets/raw_datasets.md b/docs/datasets/raw_datasets.md new file mode 100644 index 00000000..4288a760 --- /dev/null +++ b/docs/datasets/raw_datasets.md @@ -0,0 +1,5030 @@ +# 'Raw' Datasets + +------------------------------ +## Technologiedaten + +### Jahresvolllaststunden + +Anhand typischer heutiger und prognostizierter Werte für Sachsen-Anhalt werden +folgende Jahresvolllaststunden angenommen: + +| Technologie | Jahr | Volllaststunden | Quelle(n) für Annahme | Anmerkung | +|-----------------|------|----------------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| Windenergie | 2022 | 1800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wind/auswahl/811-durchschnittliche_ja/#goto_811) | | +| | 2045 | 2300 | [PV- und Windflächenrechner](https://zenodo.org/record/6794558) | | +| Freiflächen-PV | 2022 | 980 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/solar/auswahl/813-durchschnittliche_ja/#goto_813), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/aktuelle-fakten-zur-photovoltaik-in-deutschland.pdf) | | +| | 2045 | 980 | [PV- und Windflächenrechner](https://zenodo.org/record/6794558), [Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) | | +| Aufdach-PV | 2022 | 910 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/solar/auswahl/813-durchschnittliche_ja/#goto_813), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/aktuelle-fakten-zur-photovoltaik-in-deutschland.pdf) | | +| | 2045 | 910 | [Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) | | +| Laufwasserkraft | 2022 | 3800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wasser/auswahl/840-durchschnittliche_ja/#goto_840) | | +| | 2045 | 3800 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/wasser/auswahl/840-durchschnittliche_ja/#goto_840) | | +| Bioenergie | 2022 | 6000 | [foederal-erneuerbar](https://www.foederal-erneuerbar.de/landesinfo/bundesland/ST/kategorie/bioenergie/auswahl/814-durchschnittliche_ja/#goto_814), [ISE](https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/DE2018_ISE_Studie_Stromgestehungskosten_Erneuerbare_Energien.pdf) | Bioenergie-Stromerzeugung (ohne
biogenen Teil des Abfalls) | +| | | | | | + +Datei: `technology_data.json` --> `full_load_hours` + +TBD: Generalisieren - automatische Generierung anhand von Global Wind Atlas / +Global Solar Atlas. + +### Leistungsdichte + +Installierbare Leistung pro Fläche / spezifischer Flächenbedarf: + +- Windenergie: 21 MW/km² +- PV-Freiflächenanlagen: 100 MW/km² +- PV-Aufdachanlagen: 140 MW/km² +- Solarthermie: ? MW/km² + +Quelle: [PV- und Windflächenrechner](https://zenodo.org/record/6794558) + +Datei: `technology_data.json` --> `power_density` + +### Nennleistung Windenergieanlage + +Als Zukunftsanlage für 2045 wird eine Enercon E126 6500 (6,5 MW) angenommen. +Diese wird für die Berechnung der Anlagenanzahl in den Ergebnissen +verwendet. + +Datei: `technology_data.json` --> `nominal_power_per_unit` + +### Batterien + +- Kleinbatterien/Heimspeicher: Nennkapazität je installierter PV-Peakleistung + und Speichernennleistung je installierter Speichernennkapazität aus + [bnetza_mastr](../../digipipe/store/raw/bnetza_mastr/dataset.md) und + [HTW](https://solar.htw-berlin.de/wp-content/uploads/HTW-Stromspeicher-Inspektion-2023.pdf). +- Großbatterien: Speichernennleistung je installierter Speichernennkapazität + aus [bnetza_mastr](../../digipipe/store/raw/bnetza_mastr/dataset.md). + +Datei: `technology_data.json` --> `batteries` + +### Warmwasserspeicher + +- Kleinwärmespeicher (dezentral): Speichernennleistung je installierter + Speichernennkapazität aus + [DEA](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage) +- Großwärmespeicher (Fernwärme): Speichernennleistung je installierter + Speichernennkapazität aus + [DEA](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage) + +Datei: `technology_data.json` --> `hot_water_storages` + +### Kosten und Wirkungsgrade + +Datei: `raw_costs_efficiencies.csv` + +##### Allgemein + +Preise werden aus den Technologie Datenblättern der Danish Energy +Agency ([1], [2], [3], [4]) entnommen. +Abweichungen werden gesondert genannt. + +alle Preise werden auf Euro im Jahr 2020 (dis-)kontiert und damit +inflationsbereinigt. + +Für Quellen +[1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and), +[2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants), +[3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage), +[4](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels) +ist das meist die Umrechnung von 2015 zu 2020. Dafür folgende Formel verwendet: + +``` +P_(2020) = P_(2015)*f_(infl) +f_(infl) = (1+i_(2015))*(1+i_(2016))...*(1+i_(2019)) +f_(infl) = 1,005 * 1,005 * 1.015 * 1,018 * 1,014 = 1,0582 +``` + +[8](https://de.statista.com/themen/112/inflation/#topicOverview) + +Werte für 2045 werden durch lineare Extrapolation ermittelt. + +##### biogas_upgrading plant + +Quelle: [4] "82 Biogas, upgrading" + +Aufbereitung von Biogas zu Bio-SNG + +##### biogas bpchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, biogas" + +Backpressure Combined heat and power (bpchp) modelliert BHKWs + +thermal effiency = electrical_effiency / (c_b+c_v) ( +laut [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +S. 390) + +##### biogas bpchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, biogas" + +Identische Werte zu biogas bpchp_central. Split fürs Energiesystem, aber +eingesetzte Technologie identisch. + +##### biogas_plant + +Quelle [4](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels): +"81 Biogas Plant, Basic conf." + +Stellt Biogas bereit, welches in KWK (biogas bpchp_central, biogas +bpchp_decentral) genutzt werden kann + +##### boiler_central + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): +"44 Natural Gas DH Only" + +##### boiler_decentral + +Quelle [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants): +"202 Gas boiler, ex single", "202 Gas boiler, ex apart", "202 Gas boiler, new +single", +"202 Gas boiler, new apart" + +Es werden für jedes Szenario jeder Wert aus 4 Komponenten zusammengesetzt. + +Diese sind die Kombinationen aus: + +- Altbau-Neubau +- Einfamilienhaus-Mehrfamilienhaus + +Diese Kompnonten werden durch Faktoren gewichtet zusammengefasst. + +Für 2020: + +- Verhältnis von Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Neubau + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb), + verbaute Gasheizungen aggregiert +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Altbau wird als 0.7 / 0.3 + angenommen + +Für 2045: + +- Verhältnis von Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Neubau + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb), + verbaute Gasheizungen in 2020 +- Verhätnis von Einfamilienhaus-Mehrfamilienhaus im Altbau wird als 0.7 / 0.3 + angenommen + +volle Berechnungen siehe "boiler_small_script.py" im Code Anhang + +##### ch4 bpchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +"06 Gas engines, natural gas" + +Backpressure Combined heat and power (bpchp) modelliert BHKWs + +thermal effiency = electrical_effiency / (c_b+c_v) ( +laut [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +S. 390) + +##### ch4 bpchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"06 Gas engines, natural gas" + +Identische Werte zu ch4 bpchp_central. Split fürs Energiesystem, aber +eingesetzte Technologie identisch. + +##### ch4 extchp_central + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"05 Gas turb. CC, steam extract., Large", [14] S. 20-21 + +##### ch4 extchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +"05 Gas turb. CC, steam extract., Large" + +[14] S. 20-21 + +Identisch wie ch4 extchp_central + +##### gt + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +04 Gas turb. simple cycle, L" + +gas turbine, offener Prozess + +##### heatpump_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +40 Comp. hp, airsource 10 MW" + +Wärmepumpentechnologie (Luft-Wasser-WP) aus Langfristigkeitsszenarien + +##### heatpump_decentral + +Quellen: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants +" +207 HP air-water,ex single", "207 HP air-water,ex apart", "207 HP air-water,new +single", "207 HP air-water,new apart", " +207 HP ground-water,ex single", "207 HP ground-water,ex apart", "207 HP +ground-water,new single", "207 HP +ground-water,new apart", +[5], [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb) + +Es werden für jedes Szenario jeder Wert aus 8 Komponenten zusammengesetzt. +Diese sind die Kombinationen aus: + +- Sole-Umwelt +- Einfamilienhaus-Mehrfamilienhaus (fast alle WP in Einfamilienhäsuern!) +- Altbau-Neubau + +Es wird das gemittelte Verhätnis Deutschlandweit der letzten 20 Jahre +angenommen (BBSR; Bundesamt für Bauwesen und +Raumordnung) + +Für 2020 wurden Annahmen für das allgemeine Verhältnis zwischen den +Möglichkeiten angenommen: + +- Sole-Umwelt sind die aggregierten Absatzzahlen aus [5] +- Einfamilienhaus-Mehrfamilienhaus + aus [6](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb) +- Altbau-Neubau + aus [7](https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/) + +Mit diesen wird für 2045 wurden Annahmen für das allgemeine Verhältnis zwischen +den Möglichkeiten angenommen: + +- Sole-Umwelt = 0.87/0.13 (Das sind die Absatzzahlen aus 2022 aus der + Branchenstudie) +- Einfamilienhaus-Mehrfamilienhaus = 0.7 / 0.3 (Das ist eine freie Annahme, die + eine fortschreitende Verbreitung in + Mehrfamilienhäusern annimmt) +- Altbau-Neubau = 0.699 / 0.301 (das gemittelte Verhätnis Deutschlandweit der + letzten 20 Jahre) + +Die Faktoren in 2045 sind daher: + +- Altbau_Umwelt_EFH = 0.4256 +- Altbau_Umwelt_MFH = 0.1824 +- Altbau_Sole_EFH = 0.0636 +- Altbau_Sole_MFH = 0.0272 +- Neubau_Umwelt_EFH = 0.1833 +- Neubau_Umwelt_MFH = 0.0785 +- Neubau_Sole_EFH = 0.0273 +- Neubau_Sole_MFH = 0.0117 + +Berechnung siehe "heatpump_small_script.py" im Code Anhang + +##### large_scale_battery + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +180 Lithium Ion Battery", Cebulla [9] S. 181 + +storage_fixom_cost Berechnung aus UMAS/Oemof_B3 übernommen, ohne Quelle dieser +Berechnung gefunden zu haben. + +storage_fixom_cost = 0,005 * storage_capacity_cost_overnight + +Große Differenzen zwischen Windnode und UMAS, UMAS Methodik übernommen + +##### pth_central + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +41 Electric Boilers, small", "41 Electric Boilers, large" + +Es wurde ein Mittelwert aus den Electric Biolers small und large gebildet, um +relevante Größen in ABW abzubilden. + +##### pth_decentral + +Quellen: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants): " +216 Electric heating,new single", "216 Electric heating,new apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral, nur ohne Kombination mit +Altbau, da power to heat in Altbauten +vernachlässigbar selten (und wenn in anderen Technologien wie +Nachtspeicherheizungen) vorkommt. + +Berechnungen siehe "pth_decentral_script" im Code Anhang + +##### small_scale_battery + +Quelle: [15](https://www.zhb-flensburg.de/fileadmin/content/spezial-einrichtungen/zhb/dokumente/dissertationen/fluri/fluri-2019-wirtschaftlichkeit-dez-stromspeicher.pdf), [17] +S. 3 + +- capacity_cost_overnight: [15] S. 41 +- effiency, lost_rate, lifetime: [15] S.v91 + +##### storage heat_central + +Quelle [3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage): " +141 +Large hot water tank" + +- capacity_cost_overnight und fixom_cost ignoriert, da + storage_capacity_cost_overnight, storage_fixom_cost einen Wert + hat +- storage heat_decentral + +Quelle [3](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage): " +141 +Large hot water tank" + +capacity_cost_overnight und fixom_cost ignoriert, da +storage_capacity_cost_overnight, storage_fixom_cost einen Wert hat + +Große Differenzen zwischen UMAS und Windnode, UMAS Methodik übernommen + +##### hydro ror + +Quellen: [16] + +- fixom_cost: S. 78 +- capacity_cost_overnight: S.75 +- lifetime: S. 72 + +##### lignite oven + +Quellen: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +206 Wood stove, single, ex tank" + +Der Kohleofen ist eine Komponente, die für die Abbildung des Ist-Zusandes +relevant ist. +Die Kohleheizung wird durch gesetzliche Regulierung nicht mehr neu verbaut +werden können, wodurch die Komponente für die +Optimierung nicht relevant ist. +Auch die Datenlage für die Kohleheizung sehr schlecht ist, die Daten werden +daher approximiert. + +Keine direkten Werte vorhanden, daher Modellierung anhand der wood stove Werte + +efficiency: + +Differenz der Energie zwischen Holz und Kohle liegt im Heizwert des Brennstoffs. +Daher wird die Effizienz der wood stove +mit Faktor des Verhältnisses der Heizwerte multipliziert. +Daten für Heizwerte von +BMWK [11](https://www.bmwk.de/Redaktion/DE/Artikel/Energie/energiedaten-gesamtausgabe.html) +und [12](https://books.google.de/books?id=n0fVYjrHAlwC&pg=PA58#v=onepage&q&f=false) +ergibt einen Faktor von 4/3 + +fixom_cost: + +Bestehen großteils aus Brennstoffkosten. Änderung zu wood stove besteht aus +Heizwert (gewonnene Energie pro kg) und +Preisdiff pro Kilogramm + +Preise aus brikett-rekord.com [13] + +lifetime: + +identisch wie wood stove + +marginal-cost: identisch wie wood stove + +Aus den Annahmen folgt, dass die Investkosten ignoriert werden können. + +##### pv_ground + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +22 Utility-scale PV", Vergleich [10] + +marginal_cost = 0, da in Quellen nicht vorhanden + +Kosten +aus [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +im Bereich von [10] + +##### pv_rooftop + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +22 PV commercial&industrial rooftop", "22 PV residential", Vergleich [10] + +gewichteter Mittelwert zwischen kommerziellen und Wohnhaus PV. + +Gewichtung anhand openMaStR Daten aus Pipeline + +``` +import geopandas as gpd +import os.path + +data_folder = os.path.join("/ROAD/TO/DATA") +data = "bnetza_mastr_pv_roof_region.gpkg" + +df = gpd.read_file(os.path.join(data_folder, data)) + +sum = df[["usage_sector", "status"]].groupby("usage_sector").count().sum() +industrial = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Industrie"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Sonstige"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Landwirtschaft"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc[ + "Gewerbe, Handel und Dienstleistungen"][0]) \ + / sum +residental = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Öffentliches Gebäude"][0] + + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Haushalt"][0]) / sum +return [industrial, residental] +``` + +ergibt 25 % industrial und 75% Haushalte. + +marginal_cost = 0, da in Quellen nicht vorhanden + +Kosten +aus [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +im Bereich von [10] + +##### thermalcollector_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and +" +46 Solar District Heating" + +##### thermalcollector_decentral + +Quelle: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants +" +215 Solar heating,ex single", "215 Solar heating,ex apart", "215 Solar +heating,new single", "215 Solar heating,new +apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral. + +Berechnungen siehe "thermalcollector_decentral_script" im Code Anhang + +##### wind onshore + +Quelle [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and): " +20 Onshore turbines", +Vergleich [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + +EE Kosten durchweg kleiner als in Windnode in 2020 + +Windnode bezieht sich auf Frauenhofer ISE aus 2018, Vorgängerstudie +zu [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + +Frauenhofer (S. + +11) [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) + CAPEX-Range höher als + DEA [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) + in 2020 + +1400000-2000000 [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) +zu +1190000 [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) +€/MW + +keine Aussagen in +Frauenhofer [10](https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html) +über 2045 + +wir wählen DEA als Quelle für die Vergleichbarkeit, da Vergleichbarkeit in der +Optimierung der Modellierung Vorrang hat + +##### wood extchp_central + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +09a Wood Chips, Medium" + +[14] S. 20-21 + +##### wood extchp_decentral + +Quelle: [1](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and) " +09a Wood Chips, Medium" + +[14] S. 20-21 + +identisch zu wood extchp_central + +##### wood oven + +Quelle: [2](https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants), " +204 Biomass auto,ex single", "204 Biomass auto,new single", "204 Biomass auto,ex +apart", "204 Biomass auto,new apart" + +Annahmen zu Gebäudebestand siehe heatpump_decentral. + +Berechnungen siehe "wood_oven_script" im Code Anhang + +##### Quellen + +[1] Danish Energy Agency (2016): "Technology Data - Energy Plants for +Electricity and District heating generation", +Version 13, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-generation-electricity-and + +[2] Danish Energy Agency (2016): "Technology Data for heating installations", +Version 4, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-individual-heating-plants + +[3] Danish Energy Agency (2018): "Technology Data – Energy storage", Version 7, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-energy-storage + +[4] Danish Energy Agency (2017): "Technology Data – Renewable fuels", Versoin 9, +von https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-renewable-fuels + +[5] Karl-Heinz Backhaus (Vaillant), Dr. Hendrik Ehrhardt (Stiebel Eltron), Sven +Kersten (NIBE), Steffen +Moser (EnBW), Frank Richert (Wolf), Ingo Rieger (Bosch), Egbert Tippelt ( +Viessmann), André Jacob +(BWP), Johanna Otting (BWP), Björn Schreinermacher (BWP)(2023): "Branchenstudie +2023: Marktentwicklung – Prognose +–Handlungsempfehlungen", Bundesverband Wärmepumpe (BWP) e. V. + +[6] Statistisches Landesamt Sachsen-Anhalt: "GENESIS-Tabelle: 31121-0006, +Statistik der Baufertigstellungen", +von https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=31121-0006&bypass=true&levelindex=0&levelid=1682324189765#abreadcrumb, +Stand: 11.04.2023 + +[7] Statista Research Department(2021): "Struktur des Wohnungsbaus nach Neubau +und Sanierung in Deutschland in den +Jahren 2001 bis 2020", +von https://de.statista.com/statistik/daten/studie/202207/umfrage/struktur-des-wohnungsbaus-nach-art-der-bauleistung-in-deutschland/, +Stand: 03.04.2023 12:26:20 + +[8] Statista: "Daten und Fakten zur Inflation und den Verbraucherpreisen" , +von https://de.statista.com/themen/112/inflation/#topicOverview , Stand: +29.03.2023 + +[9] Cebulla, Felix (2017): "Storage demand in highly renewable energy scenarios +for Europe", OPUS - Online Publikationen +der Universität Stuttgart, von https://elib.uni-stuttgart.de/handle/11682/9778 + +[10] Frauenhofer ISE (2019): "Stromgestehungskosten erneuerbare Energien", +von https://www.ise.fraunhofer.de/de/veroeffentlichungen/studien/studie-stromgestehungskosten-erneuerbare-energien.html + +[11] BMWK (2021): "Energiedaten" +von https://www.bmwk.de/Redaktion/DE/Artikel/Energie/energiedaten-gesamtausgabe.html + +[12] Michael Herrmann, Jürgen Weber: Öfen und Kamine: Raumheizungen fachgerecht +planen und bauen. Beuth Verlag, 201, +von https://books.google.de/books?id=n0fVYjrHAlwC&pg=PA58#v=onepage&q&f=false + +[13] www.brikett-rekord.com: "Energiekostenvergleich", +von https://www.brikett-rekord.com/de/heizwertvergleich-rekord-briketts.html, +letzter Abruf 8.5.2023 + +[14] WindNode: Modell, Methodik, Daten, ABW; +von: RLI +letzer Abruf 8.8.2023 + +[15] Fluri, Verena: "Wirtschaftlichkeit von zukunftsfähigen Geschäftsmodellen +dezentraler Stromspeicher" +von https://www.zhb-flensburg.de/fileadmin/content/spezial-einrichtungen/zhb/dokumente/dissertationen/fluri/fluri-2019-wirtschaftlichkeit-dez-stromspeicher.pdf, +letzter Abruf 8.8.2023 + +[16] Schröder, Andreas; Kunz, Friedrich; Meiss, Jan; Mendelevitch, Roman; +Hirschhausen, Christian von: "Current and Prospective Costs of Electricity +Generation until 2050" +von https://www.diw.de/documents/publikationen/73/diw_01.c.424566.de/diw_datadoc_2013-068.pdf, +letzter Abruf 8.8.2023 + +[17] Prüggler, Wolfgang (2019): "HEIMSPEICHERSYSTEME UND ZENTRALE +BATTERIESPEICHER – KRITISCHE FAKTOREN DER WIRTSCHAFTLICHKEIT" +von https://ens.dk/sites/ens.dk/files/Analyser/technology_data_catalogue_for_energy_storage.pdf, +letzter Abruf 8.8.2023 + +#### Code Anhang + +wood_oven_script.py + +``` +import pandas as pd +import os.path + + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * single_new + multiple_new_faktor = neubau * multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +## Daten aus DEA: +## einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +##datensheets +single_old = pd.read_excel(data, "204 Biomass auto,ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "204 Biomass auto,ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "204 Biomass auto,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "204 Biomass auto,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_effiency_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[3,5], single_old.iat[3,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[3,5], multiple_old.iat[3,6]) , + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_marginal_cost_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[23,2] / 1000, single_old.iat[23,2] / 1000), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[23,2] / 1000, multiple_old.iat[23,2] / 1000), + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2045 = [dic_capacity_cost_overnight_2045,dic_effiency_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2045 = [] + +## Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +thermal_collector_small_script.py + +``` +import pandas as pd +import os.path + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * single_new + multiple_new_faktor = neubau * multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +## Daten aus DEA: +## einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +##datensheets +single_old = pd.read_excel(data, "215 Solar heating,ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "215 Solar heating,ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "215 Solar heating,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "215 Solar heating,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045] +val_2045 = [] + +## Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +pv_rooftop_script.py: + +``` +import pandas as pd +import geopandas as gpd +import os.path + +##trennt residential and industrial rooftop PV nach Nennleistung +def get_proprtion_residential_industrtial(df): + sum = df[["usage_sector", "status"]].groupby("usage_sector").count().sum() + industrial = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Industrie"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Sonstige"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Landwirtschaft"][0] + \ + df[["usage_sector", "status"]].groupby("usage_sector").count().loc[ + "Gewerbe, Handel und Dienstleistungen"][0]) \ + / sum + residental = (df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Öffentliches Gebäude"][0] + + df[["usage_sector", "status"]].groupby("usage_sector").count().loc["Haushalt"][0]) / sum + return [industrial, residental] + + +def get_qgis_df(GeoDataFrame): + gdf = gpd.read_file(GeoDataFrame, where="geometry_approximated='0'") + gdf.where(gdf["status"] == "In Betrieb").to_file("bnetza_mastr_pv_roof_region_filtered.gpkg") + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + +def get_agg_price_2045(dic, proportion): + # getting faktoren + industrial_factor = proportion[0][0] + residential_factor = proportion[1][0] + + residential = residential_factor * dic["residential_price"] + industrial = industrial_factor * dic["industrial_price"] + + preis = residential + industrial + + return preis + + +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = ["bnetza_mastr_pv_ground_region.gpkg", "bnetza_mastr_pv_roof_region.gpkg"] + +df = gpd.read_file(os.path.join(data_folder, data[1])) + +## Daten aus DEA: +## einlesen von Daten +data_folder_sheets = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data_sheets = os.path.join(data_folder_sheets, "technology_data_for_el_and_dh.xlsx") + +##datensheets +residential = pd.read_excel(data_sheets, "22 Rooftop PV residential", skiprows=4, nrows=42) +industrial = pd.read_excel(data_sheets, "22 Rooftop PV comm.&industrial", skiprows=4, nrows=42) + +proportion = get_proprtion_residential_industrtial(df) + +dic_capacity_cost_overnight_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[10,5], residential.iat[10,6])*1000000, + "industrial_price": linear_interpolate_2045(industrial.iat[10,5], industrial.iat[10,6])*1000000 +} +dic_fixom_cost_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[18,5], residential.iat[18,6]), + "industrial_price": linear_interpolate_2045(industrial.iat[18,5], industrial.iat[18,6]), +} + +dic_lifetime_2045 = { + "residential_price": linear_interpolate_2045(residential.iat[3,5], residential.iat[3,6]), + "industrial_price": linear_interpolate_2045(industrial.iat[3,5], industrial.iat[3,6]), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045] +val_2045 = [] + +## Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic, proportion)) + +print(dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045) +print(proportion[0][0]) +print(val_2045) +``` + +Pth_decentral_sc0irpt.py + +``` +import pandas as pd +import os.path + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + single_new = 693 / 763 + multiple_new = (763 - 693) / 763 + + single_new_faktor = single_new + multiple_new_faktor = multiple_new + + + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_new + multiple_new + + return preis + + +## Daten aus DEA: +## einlesen von Daten +data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +##datensheets +single_new = pd.read_excel(data, "216 Electric heating,new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "216 Electric heating,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2045 = { + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} +dic_effiency_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} +dic_fixom_cost_2045 = { + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} +dic_marginal_cost_2045 = { + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2045 = [dic_capacity_cost_overnight_2045, dic_effiency_2045 , dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2045 = [] + +print(dic_fixom_cost_2045) +## Berechnungen +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2045) +``` + +heatpump_small_script.py: + +``` +import pandas as pd +import os.path + +def get_faktoren_new(df): + + hp_agg = {"single_erdwaerme": 0, "multiple_erdwaerme": 0, "single_umweltwaerme": 0, "multiple_umweltwaerme": 0,} + + for row in df.itertuples(): + bereich = row[1].split(",")[2] + energie = row[1].split(",")[3] + try: + count_insg = int(row[1].split(",")[4]) + count_single = int(row[1].split(",")[5]) + except: + ValueError + + if bereich == "Sachsen-Anhalt": + if energie == "Geothermie": + hp_agg["single_erdwaerme"] += count_single + hp_agg["multiple_erdwaerme"] += (count_insg - count_single) + elif energie == "Umweltthermie (Luft / Wasser)": + hp_agg["single_umweltwaerme"] += count_single + hp_agg["multiple_umweltwaerme"] += (count_insg - count_single) + else: + continue + + else: + continue + + hp_agg_sum = sum(hp_agg.values()) + air_single_new = hp_agg["single_umweltwaerme"] / hp_agg_sum + air_multiple_new = hp_agg["multiple_umweltwaerme"] / hp_agg_sum + ground_single_new = hp_agg["single_erdwaerme"] / hp_agg_sum + ground_multiple_new = hp_agg["multiple_erdwaerme"] / hp_agg_sum + + return air_single_new, air_multiple_new, ground_single_new, ground_multiple_new + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2020(dic): + # nach BWP: Absatz von 2010-2020 -> bildet Bestand mit Kosten von Neubau ab + wp_neubau_abs = 52500+52500+45000+45000+37500+37500+37500+37500+30000+30000+30000 + wp_altbau_abs = 67500+37500+37500+37500+30000+22500+22500+22500+30000+30000+22500 + wp_gesamt_abs = wp_altbau_abs + wp_neubau_abs + wp_neubau = wp_neubau_abs / wp_gesamt_abs + wp_geo = 333333 / (333333+750000) + wp_umwelt = 750000 / (333333+750000) #Umwelt = Luft und Wasser + + # Verhältnisse WP Alt und Neubau in ST nach destatis + # Daten einlesen + data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") + hp = os.path.join(data_folder, "2023_04_11_ST_thermische_Primärenergie_neubau_2010-2020.csv") + df = pd.read_csv(hp, encoding="ISO8859-1", delimiter=";", skiprows=range(0, 10), nrows=2150) + + faktoren_new = get_faktoren_new(df) + + air_water_single_new_faktor = wp_neubau * faktoren_new[0] + air_water_multiple_new_faktor = wp_neubau * faktoren_new[1] + ground_water_single_new_fakotr = wp_neubau * faktoren_new[2] + ground_water_multiple_new_faktor = wp_neubau * faktoren_new[3] + + # wp altbau: + altbau_air = wp_umwelt - (air_water_single_new_faktor + air_water_multiple_new_faktor) + altbau_ground = wp_geo - (ground_water_single_new_fakotr + ground_water_multiple_new_faktor) + + # keine Daten, daher wie neubau angenommen (es gibt keinen Grund zu glauben, dass im Mehrfamilien-Altbau mehr WP verbaut werden) + single_faktor = faktoren_new[0] + faktoren_new[2] # ca 0.95 + multiple_faktor = faktoren_new[1] + faktoren_new[3] # ca 0.05 + + air_water_single_old_faktor = altbau_air * single_faktor + air_water_multiple_old_faktor = altbau_air * multiple_faktor + ground_water_single_old_fakotr = altbau_ground * single_faktor + ground_water_multiple_old_faktor = altbau_ground * multiple_faktor + + + air_water_single_old = air_water_single_old_faktor * dic["air_water_single_old_price"] + air_water_multiple_old = air_water_multiple_old_faktor * dic["air_water_multiple_old_price"] + ground_water_single_old = ground_water_single_old_fakotr * dic["ground_water_single_old_price"] + ground_water_multiple_old = ground_water_multiple_old_faktor * dic["ground_water_multiple_old_price"] + + air_water_single_new = air_water_single_new_faktor * dic["air_water_single_new_price"] + air_water_multiple_new = air_water_multiple_new_faktor * dic["air_water_multiple_new_price"] + ground_water_single_new = ground_water_single_new_fakotr * dic["ground_water_single_new_price"] + ground_water_multiple_new = ground_water_multiple_new_faktor * dic["ground_water_multiple_new_price"] + + altbau_kosten = air_water_single_old + air_water_multiple_old + ground_water_single_old + ground_water_multiple_old + neubau_kosten = air_water_single_new + air_water_multiple_new + ground_water_single_new + ground_water_multiple_new + + preis = altbau_kosten + neubau_kosten + + faktoren = [air_water_single_old_faktor, + air_water_multiple_old_faktor, + ground_water_single_old_fakotr, + ground_water_multiple_old_faktor, + air_water_single_new_faktor, + air_water_multiple_new_faktor, + ground_water_single_new_fakotr, + ground_water_multiple_new_faktor, + ] + + return preis, faktoren + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau_allg_prozent = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau_allg_prozent = 1 - neubau_allg_prozent + + # Sole/Luft nach Absatz 2022 laut BWP + ground = 0.13 + air = 0.87 + + # Einfamilienhaus/Mehrfamilienhaus + single = 0.7 + multiple = 0.3 + + + # Faktoren + air_water_single_old_faktor = altbau_allg_prozent * air * single + air_water_multiple_old_faktor = altbau_allg_prozent * air * multiple + ground_water_single_old_fakotr = altbau_allg_prozent * ground * single + ground_water_multiple_old_faktor = altbau_allg_prozent * ground * multiple + air_water_single_new_faktor = neubau_allg_prozent * air * single + air_water_multiple_new_faktor = neubau_allg_prozent * air * multiple + ground_water_single_new_fakotr = neubau_allg_prozent * ground * single + ground_water_multiple_new_faktor = neubau_allg_prozent * ground * multiple + + air_water_single_old = air_water_single_old_faktor * dic["air_water_single_old_price"] + air_water_multiple_old = air_water_multiple_old_faktor * dic["air_water_multiple_old_price"] + ground_water_single_old = ground_water_single_old_fakotr * dic["ground_water_single_old_price"] + ground_water_multiple_old = ground_water_multiple_old_faktor * dic["ground_water_multiple_old_price"] + air_water_single_new = air_water_single_new_faktor * dic["air_water_single_new_price"] + air_water_multiple_new = air_water_multiple_new_faktor * dic["air_water_multiple_new_price"] + ground_water_single_new = ground_water_single_new_fakotr * dic["ground_water_single_new_price"] + ground_water_multiple_new = ground_water_multiple_new_faktor * dic["ground_water_multiple_new_price"] + + altbau_kosten = air_water_single_old + air_water_multiple_old + ground_water_single_old + ground_water_multiple_old + neubau_kosten = air_water_single_new + air_water_multiple_new + ground_water_single_new + ground_water_multiple_new + + preis = altbau_kosten + neubau_kosten + + faktoren = [air_water_single_old_faktor, + air_water_multiple_old_faktor, + ground_water_single_old_fakotr, + ground_water_multiple_old_faktor, + air_water_single_new_faktor, + air_water_multiple_new_faktor, + ground_water_single_new_fakotr, + ground_water_multiple_new_faktor, + ] + + return preis, faktoren + + +## Daten aus DEA: +## einlesen von Daten +data_folder = os.path.join("/home/local/RL-INSTITUT/aaron.schilling/Dokumente/Projekte/Digipipe") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +##datensheets +air_water_single_old = pd.read_excel(data, "207 HP air-water,ex single", skiprows=4, nrows=33) +air_water_multiple_old = pd.read_excel(data, "207 HP air-water,ex apart", skiprows=4, nrows=33) +ground_water_single_old = pd.read_excel(data, "207 HP air-water,new single", skiprows=4, nrows=33) +ground_water_multiple_old = pd.read_excel(data, "207 HP air-water,new apart", skiprows=4, nrows=33) +air_water_single_new = pd.read_excel(data, "207 HP ground-water,ex single", skiprows=4, nrows=33) +air_water_multiple_new = pd.read_excel(data, "207 HP ground-water,ex apart", skiprows=4, nrows=33) +ground_water_single_new = pd.read_excel(data, "207 HP ground-water,new single", skiprows=4, nrows=33) +ground_water_multiple_new = pd.read_excel(data, "207 HP ground-water,new apart", skiprows=4, nrows=33) + +dic_capacity_cost_overnight_2020 = { + "air_water_single_old_price": air_water_single_old.iat[19,2]*1000/(air_water_single_old.iat[0,2]/1000), + "air_water_multiple_old_price": air_water_multiple_old.iat[19,2]*1000/(air_water_multiple_old.iat[0,2]/1000), + "ground_water_single_old_price": ground_water_single_old.iat[19,2]*1000/(ground_water_single_old.iat[0,2]/1000), + "ground_water_multiple_old_price": ground_water_multiple_old.iat[19,2]*1000/(ground_water_multiple_old.iat[0,2]/1000), + "air_water_single_new_price": air_water_single_new.iat[19,2]*1000/(air_water_single_new.iat[0,2]/1000), + "air_water_multiple_new_price": air_water_multiple_new.iat[19,2]*1000/(air_water_multiple_new.iat[0,2]/1000), + "ground_water_single_new_price": ground_water_single_new.iat[19,2]*1000/(ground_water_single_new.iat[0,2]/1000), + "ground_water_multiple_new_price": ground_water_multiple_new.iat[19,2]*1000/(ground_water_multiple_new.iat[0,2]/1000), +} +dic_fixom_cost_2020 = { + "air_water_single_old_price": air_water_single_old.iat[24,2]/(air_water_single_old.iat[0,2]/1000), + "air_water_multiple_old_price": air_water_multiple_old.iat[24,2]/(air_water_multiple_old.iat[0,2]/1000), + "ground_water_single_old_price": ground_water_single_old.iat[24,2]/(ground_water_single_old.iat[0,2]/1000), + "ground_water_multiple_old_price": ground_water_multiple_old.iat[24,2]/(ground_water_multiple_old.iat[0,2]/1000), + "air_water_single_new_price": air_water_single_new.iat[24,2]/(air_water_single_new.iat[0,2]/1000), + "air_water_multiple_new_price": air_water_multiple_new.iat[24,2]/(air_water_multiple_new.iat[0,2]/1000), + "ground_water_single_new_price": ground_water_single_new.iat[24,2]/(ground_water_single_new.iat[0,2]/1000), + "ground_water_multiple_new_price": ground_water_multiple_new.iat[24,2]/(ground_water_multiple_new.iat[0,2]/1000), +} +dic_lifetime_2020 = { + "air_water_single_old_price": air_water_single_old.iat[5,2], + "air_water_multiple_old_price": air_water_multiple_old.iat[5,2], + "ground_water_single_old_price": ground_water_single_old.iat[5,2], + "ground_water_multiple_old_price": ground_water_multiple_old.iat[5,2], + "air_water_single_new_price": air_water_single_new.iat[5,2], + "air_water_multiple_new_price": air_water_multiple_new.iat[5,2], + "ground_water_single_new_price": ground_water_single_new.iat[5,2], + "ground_water_multiple_new_price": ground_water_multiple_new.iat[5,2], +} +dic_marginal_cost_2020 = { + "air_water_single_old_price": air_water_single_old.iat[23,2] / 1000, + "air_water_multiple_old_price": air_water_multiple_old.iat[23,2] / 1000, + "ground_water_single_old_price": ground_water_single_old.iat[23,2] / 1000, + "ground_water_multiple_old_price": ground_water_multiple_old.iat[23,2] / 1000, + "air_water_single_new_price": air_water_single_new.iat[23,2] / 1000, + "air_water_multiple_new_price": air_water_multiple_new.iat[23,2] / 1000, + "ground_water_single_new_price": ground_water_single_new.iat[23,2] / 1000, + "ground_water_multiple_new_price": ground_water_multiple_new.iat[23,2] / 1000, +} +dic_capacity_cost_overnight_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[19,5]*1000/(air_water_single_old.iat[0,5]/1000), air_water_single_old.iat[19,6]*1000/(air_water_single_old.iat[0,6]/1000)), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[19,5]*1000/(air_water_multiple_old.iat[0,5]/1000), air_water_multiple_old.iat[19,6]*1000/(air_water_multiple_old.iat[0,6]/1000)), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[19,5]*1000/(ground_water_single_old.iat[0,5]/1000),ground_water_single_old.iat[19,6]*1000/(ground_water_single_old.iat[0,6]/1000)), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[19,5]*1000/(ground_water_multiple_old.iat[0,5]/1000), ground_water_multiple_old.iat[19,6]*1000/(ground_water_multiple_old.iat[0,6]/1000)), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[19,5]*1000/(air_water_single_new.iat[0,5]/1000), air_water_single_new.iat[19,6]*1000/(air_water_single_new.iat[0,6]/1000)), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[19,5]*1000/(air_water_multiple_new.iat[0,5]/1000),air_water_multiple_new.iat[19,6]*1000/(air_water_multiple_new.iat[0,6]/1000)), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[19,5]*1000/(ground_water_single_new.iat[0,5]/1000), ground_water_single_new.iat[19,6]*1000/(ground_water_single_new.iat[0,6]/1000)), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[19,5]*1000/(ground_water_multiple_new.iat[0,5]/1000), ground_water_multiple_new.iat[19,6]*1000/(ground_water_multiple_new.iat[0,6]/1000)), +} +dic_fixom_cost_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[24,5]/(air_water_single_old.iat[0,5]/1000), air_water_single_old.iat[24,6]/(air_water_single_old.iat[0,6]/1000)), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[24,5]/(air_water_multiple_old.iat[0,5]/1000), air_water_multiple_old.iat[24,6]/(air_water_multiple_old.iat[0,6]/1000)), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[24,5]/(ground_water_single_old.iat[0,5]/1000), ground_water_single_old.iat[24,6]/(ground_water_single_old.iat[0,6]/1000)), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[24,5]/(ground_water_multiple_old.iat[0,5]/1000), ground_water_multiple_old.iat[24,6]/(ground_water_multiple_old.iat[0,6]/1000)), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[24,5]/(air_water_single_new.iat[0,5]/1000), air_water_single_new.iat[24,6]/(air_water_single_new.iat[0,6]/1000)), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[24,5]/(air_water_multiple_new.iat[0,5]/1000), air_water_multiple_new.iat[24,6]/(air_water_multiple_new.iat[0,6]/1000)), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[24,5]/(ground_water_single_new.iat[0,5]/1000), ground_water_single_new.iat[24,6]/(ground_water_single_new.iat[0,6]/1000)), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[24,5]/(ground_water_multiple_new.iat[0,5]/1000), ground_water_multiple_new.iat[24,6]/(ground_water_multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[5,5], air_water_single_old.iat[5,6]), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[5,5], air_water_multiple_old.iat[5,6]), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[5,5], ground_water_single_old.iat[5,6]), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[5,5], ground_water_multiple_old.iat[5,6]), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[5,5], air_water_single_new.iat[5,6]), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[5,5], air_water_multiple_new.iat[5,6]), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[5,5], ground_water_single_new.iat[5,6]), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[5,5], ground_water_multiple_new.iat[5,6]), +} +dic_marginal_cost_2045 = { + "air_water_single_old_price": linear_interpolate_2045(air_water_single_old.iat[23,5] / 1000, air_water_single_old.iat[23,6] / 1000), + "air_water_multiple_old_price": linear_interpolate_2045(air_water_multiple_old.iat[23,5] / 1000, air_water_multiple_old.iat[23,6] / 1000), + "ground_water_single_old_price": linear_interpolate_2045(ground_water_single_old.iat[23,5] / 1000, ground_water_single_old.iat[23,6] / 1000), + "ground_water_multiple_old_price": linear_interpolate_2045(ground_water_multiple_old.iat[23,5] / 1000, ground_water_multiple_old.iat[23,6] / 1000), + "air_water_single_new_price": linear_interpolate_2045(air_water_single_new.iat[23,5] / 1000, air_water_single_new.iat[23,6] / 1000), + "air_water_multiple_new_price": linear_interpolate_2045(air_water_multiple_new.iat[23,5] / 1000, air_water_multiple_new.iat[23,6] / 1000), + "ground_water_single_new_price": linear_interpolate_2045(ground_water_single_new.iat[23,5] / 1000, ground_water_single_new.iat[23,6] / 1000), + "ground_water_multiple_new_price": linear_interpolate_2045(ground_water_multiple_new.iat[23,5] / 1000, ground_water_multiple_new.iat[23,6] / 1000), +} + +dic_2020 = [dic_capacity_cost_overnight_2020, dic_fixom_cost_2020, dic_lifetime_2020, dic_marginal_cost_2020] +dic_2045 = [dic_capacity_cost_overnight_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] + +val_2020 = [] +val_2045 = [] +faktoren_2020 = get_agg_price_2020(dic_fixom_cost_2045)[1] +faktoren_2045 = get_agg_price_2045(dic_fixom_cost_2045)[1] + +## Berechnungen +for dic in dic_2020: + val_2020.append(get_agg_price_2020(dic)[0]) + +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)[0]) + +print(val_2020, val_2045) +print(faktoren_2020, faktoren_2045) +``` + +boiler_small_script.py: + +``` +import pandas as pd +import os.path + +def get_faktoren_new(df): + + gas_agg = {"single": 0, "multiple": 0} + + for row in df.itertuples(): + bereich = row[1].split(",")[2] + energie = row[1].split(",")[3] + try: + count_insg = int(row[1].split(",")[4]) + count_single = int(row[1].split(",")[5]) + except: + ValueError + + if bereich == "Sachsen-Anhalt": + if energie == "Gas": + gas_agg["single"] += count_single + gas_agg["multiple"] += (count_insg - count_single) + else: + continue + + else: + continue + + gas_agg_sum = sum(gas_agg.values()) + single_new = gas_agg["single"] / gas_agg_sum + multiple_new = gas_agg["multiple"] / gas_agg_sum + + return single_new, multiple_new, + + +def linear_interpolate_2045(wert_1, wert_2): + zeit_1 = 2040 + zeit_2 = 2050 + wert = wert_1 + (((wert_2 - wert_1) / (zeit_2 - zeit_1)) * (2045 - zeit_1)) + + return wert + + +def get_agg_price_2020(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis + # Daten einlesen + data_folder = os.path.join("/YOUR/DATA/ROAD/TAKE/ME/HOME/TO/THE/PLACE") + hp = os.path.join(data_folder, "2023_04_11_ST_thermische_Primärenergie_neubau_2010-2020.csv") + df = pd.read_csv(hp, encoding="ISO8859-1", delimiter=";", skiprows=range(0, 10), nrows=2150) + + faktoren_new = get_faktoren_new(df) + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * faktoren_new[0] + multiple_new_faktor = neubau * faktoren_new[1] + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + + return preis + + +def get_agg_price_2045(dic): + # Neubau und Sanierungen allg nach BMI f. Deutschland + neubau = (0.36 + 0.36 + 0.37 + 0.38 + 0.35 + 0.34 + 0.26 + 0.22 + 0.22 + 0.22 + 0.25 + 0.26 + 0.27 + 0.28 + 0.30 + 0.32 + 0.32 + 0.32 + 0.31 + 0.31) / 20 + altbau = 1 - neubau + + # Verhältnisse Einfamilienhaus-Mehrfamilienhaus nach destatis 2020 + gas_single_new = 693 / 763 + gas_multiple_new = (763 - 693) / 763 + + # Einfamilinehaus-Mehrfamilienhaus im Altbau Annahme: + single_faktor = 0.7 + multiple_faktor = 0.3 + + single_new_faktor = neubau * gas_single_new + multiple_new_faktor = neubau * gas_multiple_new + single_old_faktor = altbau * single_faktor + multiple_old_faktor = altbau * multiple_faktor + + single_old = single_old_faktor * dic["single_old_price"] + multiple_old = multiple_old_faktor * dic["multiple_old_price"] + single_new = single_new_faktor * dic["single_new_price"] + multiple_new = multiple_new_faktor * dic["multiple_new_price"] + + preis = single_old + multiple_old + single_new + multiple_new + + return preis + + +## Daten aus DEA: +## einlesen von Daten +data_folder = os.path.join("/ROAD/TO/DATA") +data = os.path.join(data_folder, "technology_data_heating_installations_-_0003.xlsx") + +##datensheets +single_old = pd.read_excel(data, "202 Gas boiler, ex single", skiprows=4, nrows=33) +multiple_old = pd.read_excel(data, "202 Gas boiler, ex apart", skiprows=4, nrows=33) +single_new = pd.read_excel(data, "202 Gas boiler, new single", skiprows=4, nrows=33) +multiple_new = pd.read_excel(data, "202 Gas boiler, new apart", skiprows=4, nrows=33) + + +dic_capacity_cost_overnight_2020 = { + "single_old_price": (single_old.iat[19,2]*1000)/(single_old.iat[0,2]/1000), + "multiple_old_price": (multiple_old.iat[19,2]*100)/(multiple_old.iat[0,2]/1000), + "single_new_price": (single_new.iat[19,2]*1000)/(single_new.iat[0,2]/1000), + "multiple_new_price": (multiple_new.iat[19,2]*1000)/(multiple_new.iat[0,2]/1000), +} +dic_effiency_2020 = { + "single_old_price": single_old.iat[3,2], + "multiple_old_price": multiple_old.iat[3,2], + "single_new_price": single_new.iat[3,2], + "multiple_new_price": multiple_new.iat[3,2], +} +dic_fixom_cost_2020 = { + "single_old_price": single_old.iat[24,2]/(single_old.iat[0,2]/1000), + "multiple_old_price": multiple_old.iat[24,2]/(multiple_old.iat[0,2]/1000), + "single_new_price": single_new.iat[24,2]/(single_new.iat[0,2]/1000), + "multiple_new_price": multiple_new.iat[24,2]/(multiple_new.iat[0,2]/1000), +} +dic_lifetime_2020 = { + "single_old_price": single_old.iat[5,2], + "multiple_old_price": multiple_old.iat[5,2], + "single_new_price": single_new.iat[5,2], + "multiple_new_price": multiple_new.iat[5,2], +} +dic_marginal_cost_2020 = { + "single_old_price": single_old.iat[23,2] / 1000, + "multiple_old_price": multiple_old.iat[23,2] / 1000, + "single_new_price": single_new.iat[23,2] / 1000, + "multiple_new_price": multiple_new.iat[23,2] / 1000, +} + +dic_capacity_cost_overnight_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[19,5]*1000)/(single_old.iat[0,5]/1000), (single_old.iat[19,6]*1000)/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[19,5]*1000)/(multiple_old.iat[0,5]/1000), (multiple_old.iat[19,6]*1000)/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[19,5]*1000)/(single_new.iat[0,5]/1000), (single_new.iat[19,6]*1000)/(single_new.iat[0,6]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[19,5]*1000)/(multiple_new.iat[0,5]/1000), (multiple_new.iat[19,6]*1000)/(multiple_new.iat[0,6]/1000)), +} + +dic_effiency_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[3,5], single_old.iat[3,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[3,5], multiple_old.iat[3,6]) , + "single_new_price": linear_interpolate_2045(single_new.iat[3,5], single_new.iat[3,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[3,5], multiple_new.iat[3,6]) +} + +dic_fixom_cost_2045 = { + "single_old_price": linear_interpolate_2045((single_old.iat[24,5])/(single_old.iat[0,5]/1000), (single_old.iat[24,6])/(single_old.iat[0,6]/1000)), + "multiple_old_price": linear_interpolate_2045((multiple_old.iat[24,5])/(multiple_old.iat[0,5]/1000), (multiple_old.iat[24,6])/(multiple_old.iat[0,6]/1000)), + "single_new_price": linear_interpolate_2045((single_new.iat[24,5])/(single_new.iat[0,5]/1000), (single_new.iat[24,5])/(single_new.iat[0,5]/1000)), + "multiple_new_price": linear_interpolate_2045((multiple_new.iat[24,5])/(multiple_new.iat[0,5]/1000), (multiple_new.iat[24,6])/(multiple_new.iat[0,6]/1000)), +} +dic_lifetime_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[5,5], single_old.iat[5,6]), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[5,5], multiple_old.iat[5,6]), + "single_new_price": linear_interpolate_2045(single_new.iat[5,5], single_new.iat[5,6]), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[5,5], multiple_new.iat[5,6]), +} + +dic_marginal_cost_2045 = { + "single_old_price": linear_interpolate_2045(single_old.iat[23,2] / 1000, single_old.iat[23,2] / 1000), + "multiple_old_price": linear_interpolate_2045(multiple_old.iat[23,2] / 1000, multiple_old.iat[23,2] / 1000), + "single_new_price": linear_interpolate_2045(single_new.iat[23,2] / 1000,single_new.iat[23,2] ), + "multiple_new_price": linear_interpolate_2045(multiple_new.iat[23,2] / 1000, multiple_new.iat[23,2] / 1000), +} + +dic_2020 = [dic_capacity_cost_overnight_2020, dic_effiency_2020, dic_fixom_cost_2020, dic_lifetime_2020, dic_marginal_cost_2020] +dic_2045 = [dic_capacity_cost_overnight_2045,dic_effiency_2045, dic_fixom_cost_2045, dic_lifetime_2045, dic_marginal_cost_2045] +val_2020 = [] +val_2045 = [] + +## Berechnungen +for dic in dic_2020: + val_2020.append(get_agg_price_2020(dic)) + +for dic in dic_2045: + val_2045.append(get_agg_price_2045(dic)) + +print(val_2020, val_2045) + +``` + +**Dataset: `raw/technology_data`** + +??? metadata "Metadata" + ```json + { + "name": "technology_data", + "title": "Technologiedaten", + "id": "technology_data", + "description": "Jahresvollaststunden, Leistungsdichte, Nennleistung, Kosten und Effizienzen von energieumwandlungs Technologien", + "language": [ + "de-DE", + "en-GB" + ], + "subject": [], + "keywords": [ + "technologiedaten", + "Jahresvollaststunden", + "Nennleistung", + "Kosten", + "Effizienzen" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Europe", + "extent": "Europe", + "resolution": "" + }, + "temporal": { + "referenceDate": null, + "timeseries": null + }, + "sources": [ + { + "title": "F\u00f6deral Erneuerbar", + "description": "Jahresvollaststunden, Leistungsdichte, Nennleistung, Kosten und Effizienzen von energieumwandlungs Technologien", + "path": "https://www.foederal-erneuerbar.de", + "licenses": null + }, + { + "title": "PV- und Windfl\u00e4chenrechner", + "description": "Der Photovoltaik- und Windfl\u00e4chenrechner - Methoden und Daten", + "path": "https://zenodo.org/record/6794558", + "licenses": null + }, + { + "title": "Ariadne Szenarienreport", + "description": "Ariadne Szenarienreport", + "path": "https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf", + "licenses": null + }, + { + "title": "Technologiedaten", + "description": "Kosten und Effizienzen von Energieumwandlungtechnologien", + "path": "https://ens.dk/en/our-services/projections-and-models/technology-data", + "licenses": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Lokale Verwaltungseinheiten + +Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU +sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der +Europäischen Union. + +**Dataset: `raw/eurostat_lau`** + +??? metadata "Metadata" + ```json + { + "name": "eurostat_lau", + "title": "Lokale Verwaltungseinheiten", + "id": "eurostat_lau", + "description": "Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der Europ\u00e4ischen Union", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "Verwaltungseinheiten", + "NUTS", + "LAU" + ], + "publicationDate": "2022-12-15", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-06-30", + "timeseries": null + }, + "sources": [ + { + "title": "Lokale Verwaltungseinheiten", + "description": "Lokale Verwaltungseinheiten (LAUs) von Eurostat, mit NUTS kompatibel. Diese LAU sind die Bausteine der NUTS und umfassen die Gemeinden und Kommunen der Europ\u00e4ischen Union", + "path": "https://ec.europa.eu/eurostat/de/web/nuts/local-administrative-units", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 eurostat, 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 eurostat, 2023" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-25", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Bevölkerungsprognose Sachsen-Anhalt + +Bevölkerungsprognose je Gemeinde bis 2035 des Statistischen Landesamtes +Sachsen-Anhalt. Stand: 2021 + +**Dataset: `raw/stala_st_pop_prog`** + +??? metadata "Metadata" + ```json + { + "name": "stala_st_pop_prog", + "title": "Regionalisierte Bev\u00f6lkerungsprognose", + "id": "stala_st_pop_prog", + "description": "Prognostizierter Bev\u00f6lkerungsstand in den Gemeinden, kreisfreien St\u00e4dten und Landkreisen nach Prognosejahr und Geschlecht", + "language": [ + "de-DE" + ], + "subject": [], + "keywords": [ + "Bev\u00f6lkerungsprognose", + "population" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Sachsen-Anhalt", + "extent": "Sachsen-Anhalt", + "resolution": "" + }, + "temporal": { + "referenceDate": null, + "timeseries": [ + { + "start": "2019", + "end": "2035", + "resolution": "1 year", + "alignment": null, + "aggregationType": "sum" + } + ] + }, + "sources": [ + { + "title": "1_Internettabelle_7RBP_nach_Prognosejahr_Geschlecht_alle_Ebenen", + "description": "Prognostizierter Bev\u00f6lkerungsstand in den Gemeinden, kreisfreien St\u00e4dten und Landkreisen nach Prognosejahr und Geschlecht", + "path": "statistik.sachsen-anhalt.de/themen/bevoelkerung-mikrozensus-freiwillige-haushaltserhebungen/bevoelkerung/bevoelkerungsprognose-und-haushalteprognose/#c312231", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 Version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 2023 Landesportal Sachsen-Anhalt " + } + ] + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Dachflächenpotenzial PV-Aufdachanlagen in ABW + +Abschätzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in +Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft. + +Dafür wurden auf Basis des +[Digitalen Oberflächenmodells (DOM2)](https://www.lvermgeo.sachsen-anhalt.de/de/dom2-landesweit.html) +Schattenberechnungen durchgeführt. Anhand des +[LoD2 3D-Gebäudemodells](https://www.lvermgeo.sachsen-anhalt.de/de/download_lod2.html) +wurden für verschiedene Dachausrichtungen (nord, ost, süd, west, flach) die +installierbare Leistung bestimmt und mittels der Globalstrahlung und typischer +technischer Parameter für jedes Gebäude und jede Dachflächenorientierung +potenzielle Erträge berechnet. + +Quellen: + +- [Hauptseite](https://www.planungsregion-abw.de/geodaten/) +- [Geodaten](https://gis-entwicklung2.planungsregion-abw.de/geoserver/wfs?SERVICE=WFS&REQUEST=GetCapabilities) +- [Anwendung](https://ris.planungsregion-abw.de/mapbender/application/pv_dachflaechenpot_rpg_abw) + +**Dataset: `raw/rpg_abw_pv_roof_potential`** + +??? metadata "Metadata" + ```json + { + "name": "rpg_abw_pv_roof_potential", + "title": "Dachfl\u00e4chenpotenzial PV-Aufdachanlagen in ABW", + "id": "rpg_abw_pv_roof_potential", + "description": "Absch\u00e4tzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "PV", + "Photovoltaic", + "Fl\u00e4chenpotential", + "Aufdachanlagen" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "ABW", + "extent": "ABW", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-10", + "timeseries": null + }, + "sources": [ + { + "title": "Dachfl\u00e4chenpotenzial PV-Aufdachanlagen in ABW", + "description": "Absch\u00e4tzung der installierten Leistung und des Ertrags von PV-Aufdachanlagen in Anhalt-Bitterfeld-Wittenberg der Regionalen Planungsgemeinschaft.", + "path": "https://www.planungsregion-abw.de/geodaten/", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / 2022" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Installierte Leistungen von Biomasse-Konversionstechnologien + +Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad +und 2,6 Mio. ha Anbauflächen im Jahr 2020 und 2050 der Tabelle 13 im +Dokument +["Technoökonomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)"](../../digipipe/store/raw/dbfz_biomass_heat_capacities/metadata.json) +für die folgenden Konversionsanlagen von Biomasse entnommen: + +- Biomethan-Blockheizkraftwerk +- Holzhackschnitzelkessel Sektor Industrie +- Pelletkessel Sektor GHD +- Holzhackschnitzelkessel Sektor GHD +- Scheitholzvergaserkessel +- Pelletkessel Sektor Gebäude +- Biogasanlage + Blockheizkraftwerk +- Biomethan Gas- und Dampfkombikraftwerk +- Klärschlammfaulung + Blockheizkraftwerk +- Papier-Zellstoff-KWK +- Holzvergaser + Blockheizkraftwerk +- Mikro-Holzgas-Blockheizkraftwerk + +Die Konversionstechnologien sind in der Spalte "technology" gelistet, während +sich ihre installierten Leistungen für die beiden Projektionsjahre in den +Spalten "capacity_[MW]_2020" und "capacity_[MW]_2050" befinden. + +In den Spalten "decentral" und "central" wird mit "x" angegeben, ob jeweils ein +dezentraler und zentraler Einsatz der Konversionsanlage Stand der Technik ist. + +In der Spalte "carrier" wird analog zur Konvention der Namensgebung im +Energiesystem (siehe [esys.md](../../digipipe/store/../../docs/sections/esys.md)) der +jeweilige in die Konversionsanlage eintretende Energieträger notiert. +Diese werden Abbildung 3 des Dokuments entommen. Der Energieträger Schwarzlauge +wird vereinfachend dem Energieträger feste Biomasse bzw. Holz zugeordnet. +Klärgas und Holzgas werden vereinfachend Biogas zugeordnet. + +In der Spalte "tech" findet die Zuordnung zu der Technologie anhand der im +Energiesystem verwendeten Komponenten (siehe +[esys.md](../../digipipe/store/../../docs/sections/esys.md)) statt. + +**Dataset: `raw/dbfz_biomass_heat_capacities`** + +??? metadata "Metadata" + ```json + { + "name": "dbfz_biomass_heat_capacities", + "title": "Techno\u00f6konomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)", + "id": "dbfz_biomass_heat_capacities", + "description": "Installierte Leistungen von Biomasse-Konversionstechnologien. Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad", + "language": [ + "de-DE" + ], + "subject": [], + "keywords": [ + "Biomasse", + "Biomassepotential", + "Analyse", + "Transformationspfade", + "TATBIO" + ], + "publicationDate": "2019-05-08", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2019-04-30", + "timeseries": null + }, + "sources": [ + { + "title": "Techno\u00f6konomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)", + "description": "Installierte Leistungen von Biomasse-Konversionstechnologien. Die installierten Leistungen in MW wird im Szenario 80 % Transformationspfad", + "path": "https://www.ufz.de/export/data/2/231891_technooekonomische-analyse-und-transformationspfade-des-energetischen-biomassepotentials(1).pdf", + "licenses": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": [], + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Temperatur + +Stündliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen +Wetterdienstes +([Climate Data Center](https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/)) +für das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt +[WindNODE](https://windnode-abw.readthedocs.io/en/latest/energy_system_model.html#energy-demand-today). + +Werte: + +- `temp_amb`: Lufttemperatur in 2 m Höhe +- `temp_soil`: Erdbodentemperatur in 1 m Tiefe + +Verwendete Stationen: + +- Wittenberg +- Köthen +- Jessnitz +- Seehausen +- Holzdorf + +Die Zuordnung der Stationsmesswerte zu Gemeinden erfolgte über die jeweils +nächstgelegene Wetterstation. + +**Dataset: `raw/dwd_temperature`** + +??? metadata "Metadata" + ```json + { + "name": "dwd_temperature", + "title": "temperatur_2011", + "id": "dwd_temperature", + "description": "St\u00fcndliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen Wetterdienstes (Climate Data Center) f\u00fcr das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt WindNODE.", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "Wetter", + "Temperatur" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Anhalt-Bitterfeld-Wittenberg", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-05-23", + "timeseries": [ + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": "left", + "aggregationType": null + } + ] + }, + "sources": [ + { + "title": "temperatur_2011", + "description": "St\u00fcndliche Mittelwerte der Luft- und Erdbodentemperatur des Deutschen Wetterdienstes (Climate Data Center) f\u00fcr das Jahr 2011 je Gemeinde in der Region ABW, vorverarbeitet im Projekt WindNODE", + "path": "https://www.dwd.de/DE/leistungen/cdc/climate-data-center.html", + "licenses": [ + { + "name": "GeoNutzV", + "title": "Verordnung zur Festlegung der Nutzungsbestimmungen f\u00fcr die Bereitstellung von Geodaten des Bundes", + "path": "https://www.gesetze-im-internet.de/geonutzv/GeoNutzV.pdf", + "instruction": "Alle frei zug\u00e4nglichen Geodaten und Geodatendienste d\u00fcrfen entsprechend der Verordnung zur Festlegung der Nutzungsbestimmungen f\u00fcr die Bereitstellung von Geodaten des Bundes (GeoNutzV) unter Beigabe eines Quellenvermerks ohne Einschr\u00e4nkungen weiterverwendet werden.", + "attribution": " \u00a9 Deutscher Wetterdienst" + } + ] + } + ], + "licenses": [ + { + "name": "GeoNutzV", + "title": "Verordnung zur Festlegung der Nutzungsbestimmungen f\u00fcr die Bereitstellung von Geodaten des Bundes", + "path": "https://www.gesetze-im-internet.de/geonutzv/GeoNutzV.pdf", + "instruction": "Alle frei zug\u00e4nglichen Geodaten und Geodatendienste d\u00fcrfen entsprechend der Verordnung zur Festlegung der Nutzungsbestimmungen f\u00fcr die Bereitstellung von Geodaten des Bundes (GeoNutzV) unter Beigabe eines Quellenvermerks ohne Einschr\u00e4nkungen weiterverwendet werden.", + "attribution": " \u00a9 Deutscher Wetterdienst" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "aaron.schilling@rl-institut.de", + "date": "2023-08-25", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Emissionen + +Emissionen für die Jahre 1990 und 2019 für Sachsen-Anhalt und disaggregiert für +die Region Anhalt-Bitterfeld-Wittenberg (ABW). Die Grundlage hierfür ist der +[THG-Bericht 2021](https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/221014_THG-Bericht.pdf) +Sachsen-Anhalt (ST). + +Datei: `emissions.csv`, Felder: + +- `sector`: Sektor +- `cat`: Kategorie ("*" = alle) +- `subcat`: Unterkategorie ("*" = alle) +- `name`: Bezeichner +- `st`: Emissionen Sachsen-Anhalt in kt CO2-Äquivalent +- `abw`: Emissionen Region ABW in kt CO2-Äquivalent + +`sector`, `cat` und `subcat` folgen der Nomenklatur des Common Reporting Formats +(CRF) nach [KSG Anlage 1](https://www.gesetze-im-internet.de/ksg/anlage_1.html). +[Grafik hierzu](https://expertenrat-klima.de/content/uploads/2023/05/ERK2023_Pruefbericht-Emissionsdaten-des-Jahres-2022.pdf) +(Abb. 2 auf S. 30). + +### Disaggregation + +Anhand unterschiedlicher Kriterien und Datenquellen wurde näherungsweise von den +vorliegenden Emissionen für Sachsen-Anhalt für 1990 und 2019 auf die Region ABW +disaggregiert. Je Sektor sind hier die gewählten +**energiebestimmenden Größen (EnbG)** angegeben, sowie die Herangehensweise zur +jeweiligen Berechnung. + +#### Sektor Energiewirtschaft (CRF 1.A.1 + 1.B) + +Aus der Liste der +[Emissionshandelspflichtigen Anlagen](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +wurden jene Daten zu Anlagen extrahiert, welche sich in Sachsen-Anhalt befinden +und als Bezeichnung "Energieumwandlung >= 50 MW FWL" oder "Energieumwandlung +20–50 MW FWL" (Haupttätigkeit nach TEHG) aufweisen. +Die Summe der angegebenen Emissionen (t CO2 Äq) jener Anlagen, welche in der +Region ABW liegen, wurde in Relation zu der Summe der Emissionen aus den Anlagen +in Gesamt-ST gesetzt. Dieser Anteil wurde auf die im THG-Bericht angegebene +Emissionsmenge im Sektor "Energiewirtschaft (1.A.1)" sowie "Prozessemissionen +(1.B)" angelegt und so für ABW näherungsweise disaggregiert. + +Hinweise: + +- Aufgrund mangelnder Daten wurde für das Jahr 1990 auf die neuesten verfügbaren + Daten (2005-2007) aus der Anlagenliste zurückgegriffen. +- Energiewirtschaftlich relevante Anlagen unter 20 MW FWL sind in der + Anlagenliste nicht erfasst und konnten somit nicht berücksichtigt werden. + +Quellen: + +- [Emissionshandelspflichtige Anlagen in Deutschland 2020 (Stand 03.05.2021)](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +- [Treibhausgasemissionen in Sachsen-Anhalt 2018 (Stand 12.05.2021)](https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/THG_Bericht_2018.pdf) + +##### CRF 1.A.1 + +Energiewirtschaft (Umwandlungsbereich): umfasst die öffentliche Elektrizitäts- +und Wärmeversorgung sowie Raffinerien. + +EnbG: Emissionen aus europäischem Emissionshandel + +##### CRF 1.B + +Diffuse Emissionen aus Brennstoffen: Diese Kategorie beinhaltet flüchtige +Emissionen aus der Gewinnung, Verarbeitung und Verteilung von Brennstoffen. Die +wichtigsten Quellen sind die Verteilung von Erdgas, aber auch Emissionen aus +Förderung und Abfackelung, die Extraktion und Umwandlung von Braunkohle, +Emissionen aus der Raffination von Erdöl sowie Emissionen aus der Lagerung und +Verteilung von Mineralölprodukten. + +EnbG: Emissionen aus europäischem Emissionshandel + +#### Sektor Industrie (CRF 1.A.2) + +Dieser Sektor umfasst sämtliche energiebedingten Emissionen durch verarbeitendes +Gewerbe. + +Zur Disaggregierung wurde der Energieverbrauch der Industriebetriebe in ABW mit +dem Gesamtenergieverbrauch aller Industriebetriebe in Sachsen-Anhalt in Relation +gesetzt. Dabei wurde eine Differenzierung hinsichtlich der +Energieträgerzusammensetzung von ABW im Vergleich zu ST durchgeführt und anhand +von Emissionsfaktoren berechnet. + +EnbG: Energieverbrauch nach Energieträgern + +Quellen: + +- [Energieverbrauch der Industriebetriebe in Sachsen-Anhalt nach ausgewählten Energieträgern und Kreisen](https://statistik.sachsen-anhalt.de/fileadmin/Bibliothek/Landesaemter/StaLa/startseite/Themen/Energie/Tabellen/Energieverwendung/Energieverbrauch_nach_Kreisen_ab_dem_Jahr_2010.xlsx) +- [Emissionsfaktor für Stromerzeugung (UBA)](https://www.umweltbundesamt.de/sites/default/files/medien/479/bilder/dateien/entwicklung_der_spezifischen_emissionen_des_deutschen_strommix_1990-2020_und_erste_schaetzungen_2021.pdf) +- [BISKO Bilanzierungs-Systematik Kommunal (Aktualisierung 11/2019)](https://www.ifeu.de/fileadmin/uploads/BISKO_Methodenpapier_kurz_ifeu_Nov19.pdf) + +#### Sektor Prozessemissionen (CRF 2) + +Dieser Sektor umfasst sämtliche Emissionen, welche durch Industrieprozesse +anfallen. Dies sind Emissionen aus: Herstellung mineralischer Produkte, +chemischer Industrie, Herstellung von Metallen, übrigen Prozessen und +Produktverwendungen (CRF 2.A-H). +Zur Disaggregierung wurde erneut die +[Liste der Emissionshandelspflichtigen Anlagen](https://www.dehst.de/SharedDocs/downloads/DE/anlagenlisten/2013-2020/2020.pdf?__blob=publicationFile&v=3) +herangezogen. Anders als im Sektor Energiewirtschaft (s.o.) wurde jedoch der +Anteil aller Anlagen, welche nicht der Energiewirtschaft zugerechnet werden, zur +Bestimmung des Anteils von ABW an ST gewählt. + +EnbG: Emissionen aus europäischem Emissionshandel + +#### Sektor Verkehr (CRF 1.A.3) + +Dieser Sektor umfasst Emissionen aus dem Straßenverkehr, dem zivilen +Luftverkehr, aus dem Schiffsverkehr, verbrennungsbedingte Emissionen aus dem +Schienenverkehr sowie Emissionen des übrigen Verkehrs und weitere Quellen zur +Bereitstellung der im Verkehr verbrauchten Energie. Die Verbrennung von +Mineralölprodukten im Straßenverkehr spielt die größte Rolle und macht weit über +90 % der sektoralen Emissionen aus. Daher wird zur Disaggreagation der +motorisierte Straßenverkehr über zugelassene Kraftfahrzeuge mit +durchschnittlichen Fahrleistungen und spezifischer Emissionen pro Kilometer und +Fahrzeugklasse herangezogen. + +Hierfür wird zunächst aus +[Verkehr in Kilometern (VK) ZeitreiheJahre 2014 - 2022](https://www.kba.de/DE/Statistik/Kraftverkehr/VerkehrKilometer/vk_inlaenderfahrleistung/vk_inlaenderfahrleistung_node.html;jsessionid=DD419FD0604C0BCC72A9E4533BB0319F.live21324) +und +[Umweltfreundlich mobil! Ein ökologischer Verkehrsartenvergleich für den Personen- und Güterverkehr in Deutschland)](https://www.umweltbundesamt.de/sites/default/files/medien/5750/publikationen/2021_fb_umweltfreundlich_mobil_bf.pdf) +ein durchschnittlicher Emissionswert pro Jahr und Fahrzeugklasse ermittelt. +Dieser wird mit den zugelassenen Fahrzeugen der entsprechenden Fahrzeugklassen +aus +[Kraftfahrzeugbestand nach Kraftfahrzeugarten - Stichtag 01.01. - regionale Tiefe: Kreise und krfr. Städte (bis 01.01.2019)](https://www-genesis.destatis.de/genesis//online?operation=table&code=46251-0001&bypass=true&levelindex=0&levelid=1691405772899#abreadcrumb) +einerseits für ganz Sachsen-Anhalt und andererseits ABW multipliziert. Daraus +wird ein Verhältnis der Verkehrsemissionen in ABW zu ST gewonnen. + +Hinweise: + +- Die Datenlage für die zugelassenen Fahrzeuge, gefahrenen Kilometer und + Emissionen pro km sind nicht spezifisch für 1990 sondern nur für einzelne + Jahre der frühen 1990er verfügbar. Daher ist der Emissionswert für 1990 mit + einer höheren Unsicherheit behaftet. + +EnbG: + +- Zugelassene Kraftfahrzeuge +- Durchschnittliche Fahrleistung und spez. CO2 Emission pro km und + Fahrzeugklasse + +Quellen: + +- [Kraftfahrzeugbestand nach Kraftfahrzeugarten - Stichtag 01.01. - regionale Tiefe: Kreise und krfr. Städte (bis 01.01.2019)](https://www-genesis.destatis.de/genesis//online?operation=table&code=46251-0001&bypass=true&levelindex=0&levelid=1691405772899#abreadcrumb) +- [Umweltfreundlich mobil! Ein ökologischer Verkehrsartenvergleich für den Personen- und Güterverkehr in Deutschland)](https://www.umweltbundesamt.de/sites/default/files/medien/5750/publikationen/2021_fb_umweltfreundlich_mobil_bf.pdf) +- [Verkehr in Kilometern (VK) ZeitreiheJahre 2014 - 2022](https://www.kba.de/DE/Statistik/Kraftverkehr/VerkehrKilometer/vk_inlaenderfahrleistung/vk_inlaenderfahrleistung_node.html;jsessionid=DD419FD0604C0BCC72A9E4533BB0319F.live21324) + +#### Sektor Sonstige Energie (insbes. Gebäude) (CRF 1.A.4 + 1.A.5) + +Dieser Sektor umfasst den durch Energieumwandlung nicht bereits abgedeckten +Energiebedarf. Das sind vor allem kleine Einzelfeuerungsanlagen bis hin zu +immissionsschutzrechtlich genehmigungsbedürftigen Anlagen mit einer +Nennwärmeleistung von mehreren Megawatt. Zur Disaggreagtion wurde daher der +Wärmebedarf von ABW im Verhältnis zum Wärmebedarf von gesamt Sachsen Anhalt +gewählt. Der Wärmevedarf umfasst Raumwärme, Warmwasser sowie Kochen und wird aus +Daten aus dem Pipeline-Datensatz +[demand_heat_region](../../digipipe/store/datasets/demand_heat_region/dataset.md) generiert. + +Ergebnis: 17,46 % des Bedarfs in Sachsen-Anhalt entfällt auf ABW. + +Code +``` +## Sektor HH +heat_hh_dist_states = gpd.read_file("demand_heat_zonal_stats-res-bkg_vg250_federal_states.gpkg") +heat_hh_demand_st = float(heat_hh_dist_states.loc[heat_hh_dist_states.nuts == "DEE"].heat_demand) +heat_hh_demand_abw = gpd.read_file("demand_heat_zonal_stats-res-bkg_vg250_muns_region.gpkg").heat_demand.sum() + +## Sektor GHD +heat_cts_dist_states = gpd.read_file("demand_heat_zonal_stats-ser-bkg_vg250_federal_states.gpkg") +heat_cts_demand_st = float(heat_cts_dist_states.loc[heat_cts_dist_states.nuts == "DEE"].heat_demand) +heat_cts_demand_abw = gpd.read_file("demand_heat_zonal_stats-ser-bkg_vg250_muns_region.gpkg").heat_demand.sum() + +## Anteil ABW an ST +heat_share = (heat_hh_demand_abw + heat_cts_demand_abw) / (heat_hh_demand_st + heat_cts_demand_st) +``` + +EnbG: Wärmebedarf aus Energiesystem + +#### Sektor Landwirtschaft (CRF 3) + +Der Sektor umfasst Emissionen aus der Viehwirtschaft und der Bewirtschaftung von +Böden. Daher werden zunächst die Emissionsunterkategorien 3.A-J der +Viehwirtschaft oder der Bewirtschaftung von Böden zugeordnet. Anschließend +werden diese getrennt nach den Viehbeständen bzw. der landwirtschaftlich +genutzen Fläche disaggreiert. + +##### CRF 3.A - Landwirtschaft – Fermentation + +Emissionen durch Fermentation (CRF 3.A) entstehen vorrangig durch +Verdauungsprozesse in der Viehwirtschaft. Deswegen wird der Anteil ABWs an +diesen Emissionen durch die Viehbestände abgeschätzt. + +Hinweis: + +- Die Viehbestände für 1990 sind nicht bekannt, es wird stattdessen auf die + Viehbestände von 1996 zurückggegriffen. + +EnbG: Viehbestände + +Quelle: + +- [Viehbestand der landwirtschaftlichen Betriebe in Großvieheinheiten (GV) nach Jahren und Kreisen)](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/land-und-forstwirtschaft-fischerei/tabellen-viehwirtschaft-und-tierische-erzeugnisse#c234218) + +###### CRF 3.B-J + +In den Unterkategorien 3.C-J ist eine Proportionalität der Emissionen und der +landwirtschafltich genutzen Fläche zu erwarten. Unterkategorie 2.B +"Wirtschaftsdüngerausbringung (ohne Gärreste)" ist allerdings ein Grenzfall, da +er aus Abfällen der Tierhaltung produziert wird und bereits hierbei +Treibhausgase entstehen, diese aber nicht vor Ort eingesetzt werden müssen, +sondern auf beliebigen landwirtschafltichen Flächen eingesetzt werden kann. +Daher wird hier auch diese Unterkategorie der Landnutzung zugeordnet. + +Hinweis: + +- die Flächenntuzungsdaten gehen nicht bis 1990 zurück, ändern sich über die + Jahre aber nur marginal, sodass hier nur von geringen Abweichungen auszugehen + ist. + +EnbG: Landwirtschaftlich genutzte Fläche + +Quelle: + +- [Flaeche_nach_Kultuarten_nach_Jahren_und_Kreisen](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/land-und-forstwirtschaft-fischerei/tabellen-bodennutzung-und-anbau) + +#### Sektor Abfall und Abwasser (CRF 5) + +Dieser Sektor besteht vor allem aus Emissionen aus Abfalldeponien, welche der +Zersetzung organischer Materialien in Deponien entstehen. Es wird angenommen, +dass der Abfall aus Produktionsprozessen gegenüber den Abfällen aus Konsum +vernachlässigbar sind, weswegen eine Disaggregation auf Grundlage der +Bevölkerung von ABW vorgenommen wird. + +EnbG: Bevölkerung + +Quelle: + +- [Bevölkerung nach Geschlecht in den Gemeinden](https://genesis.sachsen-anhalt.de/genesis//online?operation=table&code=12411-0001&bypass=true&levelindex=0&levelid=1691507280245#abreadcrumb) + +**Dataset: `raw/emissions`** + +??? metadata "Metadata" + ```json + { + "Daten Sachsen-Anhalt": "https://lau.sachsen-anhalt.de/fileadmin/Bibliothek/Politik_und_Verwaltung/MLU/LAU/Wir_ueber_uns/Publikationen/Fachberichte/Dateien/221014_THG-Bericht.pdf", + "Datens\u00e4tze Desaggregation": { + "Industrie": "" + } + } + ``` + +------------------------------ +## BMWK Langfristszenarien + +Langfristszenarien des Bundesministerium für Wirtschaft und Klimaschutz, Daten +auf Deutschlandebene. + +Die Daten wurden über den +[Szenario Explorer](https://langfristszenarien.de/enertile-explorer-de/szenario-explorer/) +abgerufen. + +### Verwendete Szenarien + +- **T45-Strom:** Stromfokussiertes Szenario aus den T45-Szenarien aus 2023, die + Wege zur Treibhausgasneutralität bis 2045 unter Einhaltung aktueller + politischer Vorgaben erreichen. Die Daten dieses Szenarios werden als + Grundlage für das Zielszenario in der Region verwendet. +- **TN-Strom:** Stromfokussiertes Szenario aus den TN-Szenarien aus 2021, die + unterschiedliche Pfade für Deutschland mit dem Ziel treibhausgasneutral bis + 2050 zu werden. Die Daten dieses Szenarios werden als Grundlage für den + Status quo verwendet (Ausnahme: Erzeugung Wärmenetze, hier wurden manuell + Daten für 2021 ergänzt). + +### Daten + +#### T45-Strom + +| Datensatz | Quelle | Datei | +|------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------| +| Gebäude: Haushalte und GHD Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/51944/21559a9532131c061668bf0751e519e3) | `T45-Strom_buildings_heating_demand_by_carrier.csv` | +| Gebäude: Anzahl der Heizungen nach Technologie | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/51944/21559a9532131c061668bf0751e519e3) | `T45-Strom_buildings_heating_structure_by_technology.csv` | +| GHD Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52700/c6980ea467bb26a922d34617b4fd4798) | `T45-Strom_cts_demand.csv` | +| Haushalte Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52700/c6980ea467bb26a922d34617b4fd4798) | `T45-Strom_hh_demand.csv` | +| Industrie Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/52612/9de48084ac2d54c418daaf02a6ee26e0) | `T45-Strom_ind_demand.csv` | +| Stromsystem Deutschland Leistung | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/48766/5c11999a03c547e04e73d61e4b5fc633) | `T45-Strom_electricity_installed_power.csv` | +| Erzeugung Wärmenetze Deutschland | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/49949/cf898070daec6a4e613dc889927a5feb), [Link2](https://static.agora-energiewende.de/fileadmin/Projekte/2022/2022-11_DE_Large_Scale_Heatpumps/A-EW_293_Rollout_Grosswaermepumpen_WEB.pdf) (S. 37) | `T45-Strom_Generation_Heatgrids_Germany.csv` | + +#### TN-Strom + +| Datensatz | Quelle | Datei | +|------------------------------------------------|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| Gebäude: Haushalte und GHD Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8198/698cee83d667a2f44fdea7e78ee799a2) | `TN-Strom_buildings_heating_demand_by_carrier.csv` | +| Gebäude: Anzahl der Heizungen nach Technologie | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8198/698cee83d667a2f44fdea7e78ee799a2) | `TN-Strom_buildings_heating_structure_by_technology.csv` | +| GHD Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8660/ae5a14ff0c320cbd31c5eeff2ede54ba) | `TN-Strom_cts_demand.csv` | +| Haushalte Energieträger | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/8660/ae5a14ff0c320cbd31c5eeff2ede54ba) | `TN-Strom_hh_demand.csv` | +| Industrie Energiebedarf | [Link](https://enertile-explorer.isi.fraunhofer.de:8443/open-view/29085/084bd7f45f40d31fd53341e6a94f532c) | `TN-Strom_ind_demand.csv` | + +**Dataset: `raw/bmwk_long_term_scenarios`** + +??? metadata "Metadata" + ```json + { + "name": "bmwk_long_term_scenarios", + "title": "BMWK Langfristszenarien", + "id": "bmwk_long_term_scenarios", + "description": "Langfristszenarien des Bundesministerium f\u00fcr Wirtschaft und Klimaschutz, Daten auf Deutschlandebene.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "BMWK", + "Langfristszenario", + "T45-Strom", + "TN-Strom" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": null, + "timeseries": [ + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + }, + { + "start": null, + "end": null, + "resolution": null, + "alignment": null, + "aggregationType": null + } + ] + }, + "sources": [ + { + "title": "BMWK Langfristszenarien", + "description": "Langfristszenarien des Bundesministerium f\u00fcr Wirtschaft und Klimaschutz, Daten auf Deutschlandebene.", + "path": "https://langfristszenarien.de/enertile-explorer-de/szenario-explorer/", + "licenses": null + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.6.0", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## AGEB – Anwendungsbilanzen für die Endenergiesektoren 2011 bis 2021 + +Detaillierte Anwendungsbilanzen der Endenergiesektoren für 2020 und 2021 sowie +zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energieträgern und +Anwendungszwecken für Jahre von 2011 bis 2021 der AG Energiebilanzen. + +**Dataset: `raw/ageb_energy_balance`** + +??? metadata "Metadata" + ```json + { + "name": "ageb_energy_balance", + "title": "AGEB \u2013 Anwendungsbilanzen f\u00fcr die Endenergiesektoren 2011 bis 2021", + "id": "ageb_energy_balance", + "description": "Detaillierte Anwendungsbilanzen der Endenergiesektoren f\u00fcr 2020 und 2021 sowie zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energietr\u00e4gern und Anwendungszwecken f\u00fcr Jahre von 2011 bis 2021", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Endenergiesektoren", + "Anwendungsbilanzen", + "energy-balance" + ], + "publicationDate": "2022-12-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-01", + "timeseries": null + }, + "sources": [ + { + "title": "AGEB \u2013 Anwendungsbilanzen f\u00fcr die Endenergiesektoren 2011 bis 2021", + "description": "Detaillierte Anwendungsbilanzen der Endenergiesektoren f\u00fcr 2020 und 2021 sowie zusammenfassende Zeitreihen zum Endenergieverbrauch nach Energietr\u00e4gern und Anwendungszwecken f\u00fcr Jahre von 2011 bis 2021", + "path": "https://ag-energiebilanzen.de/daten-und-fakten/anwendungsbilanzen/", + "licenses": null + } + ], + "licenses": null, + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": null, + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## sEEnergies Pan-European Thermal Atlas 5.2 (Peta5) + +Wärmebedarf für Europa 2015 in GJ (1ha Auflösung) für + +- Haushalte: Raumwärme und Warmwasser +- GHD: Raumwärme, Warmwasser und Prozesswärme + +Die Daten können auf der +[Projektseite](https://s-eenergies-open-data-euf.hub.arcgis.com) +eingesehen werden. + +### Haushalte + +Abgerufen mittels + +```commandline +wget -O Peta5_0_1_HD_res.zip https://arcgis.com/sharing/rest/content/items/d7d18b63250240a49eb81db972aa573e/data +``` + +### GHD und Industrie + +Abgerufen mittels + +```commandline +wget -O Peta5_0_1_HD_ser.zip https://arcgis.com/sharing/rest/content/items/52ff5e02111142459ed5c2fe3d80b3a0/data +``` + +**Dataset: `raw/seenergies_peta5`** + +??? metadata "Metadata" + ```json + { + "name": "seenergies_peta5", + "title": "sEEnergies Pan-European Thermal Atlas 5.2 (Peta5)", + "id": "seenergies_peta5", + "description": "W\u00e4rmebedarf f\u00fcr Europa 2015 in GJ (1ha Aufl\u00f6sung)", + "language": [ + "en-GB" + ], + "subject": null, + "keywords": [ + "European", + "Photovoltaic", + "Fl\u00e4chenpotential", + "Aufdachanlagen" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Europe", + "extent": "Europe", + "resolution": "1 ha" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": null + }, + "sources": [ + { + "title": "sEEnergies Pan-European Thermal Atlas 5.2 (Peta5)", + "description": "W\u00e4rmebedarf f\u00fcr Europa 2015 in GJ (1ha Aufl\u00f6sung)", + "path": "https://www.seenergies.eu/peta5/", + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": "The data provided is indiative and for research purpose only", + "attribution": "\u00a9 Flensburg, Halmstad and Aalborg Universities 2022" + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": "The data provided is indiative and for research purpose only", + "attribution": "\u00a9 Flensburg, Halmstad and Aalborg Universities 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Sozialversicherungspflichtig Beschäftigte und Betriebe + +Gemeindedaten der sozialversicherungspflichtig Beschäftigten am 30.06.2022 nach +Wohn- und Arbeitsort - Deutschland, Länder, Kreise und Gemeinden (Jahreszahlen) +der Bundesagentur für Arbeit. + +**Dataset: `raw/ba_employment`** + +??? metadata "Metadata" + ```json + { + "name": "ba_employment", + "title": "Gemeindedaten der sozialversicherungspflichtig Besch\u00e4ftigten nach Wohn- und Arbeitsort", + "id": "ba_employment", + "description": "Zahl der soziaversicherungspflichtig Besch\u00e4ftigten nach: Wohnort, Personengruppen, Arbeitsort, Wohnort gleich Arbeitsort, Einpendeler, Auspendler, Zahl der Betriebe", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Gemeindedaten", + "sozialversicherungspflichtig", + "Besch\u00e4ftigte", + "Wohnort", + "Arbeitsort" + ], + "publicationDate": "2023-01-16", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-23", + "timeseries": null + }, + "sources": [ + { + "title": "Gemeindedaten der sozialversicherungspflichtig Besch\u00e4ftigten nach Wohn- und Arbeitsort", + "description": "Zahl der soziaversicherungspflichtig Besch\u00e4ftigten nach: Wohnort, Personengruppen, Arbeitsort, Wohnort gleich Arbeitsort, Einpendeler, Auspendler, Zahl der Betriebe", + "path": "https://statistik.arbeitsagentur.de/SiteGlobals/Forms/Suche/Einzelheftsuche_Formular.html?nn=15024&topic_f=beschaeftigung-sozbe-gemband", + "licenses": [ + { + "name": null, + "title": null, + "path": "https://statistik.arbeitsagentur.de/DE/Statischer-Content/Servicebereich-Navigation/Bezugsbedingungen.html?nn=6654", + "instruction": "Sie k\u00f6nnen Informationen speichern, (auch auszugsweise) mit Quellenangabe weitergeben, vervielf\u00e4ltigen und verbreiten. Die Inhalte d\u00fcrfen nicht ver\u00e4ndert oder verf\u00e4lscht werden. Eigene Berechnungen sind erlaubt, jedoch als solche kenntlich zu machen. Im Falle einer Zug\u00e4nglichmachung im Internet soll dies in Form einer Verlinkung auf die Homepage der Statistik der Bundesagentur f\u00fcr Arbeit erfolgen. Die Nutzung der Inhalte f\u00fcr gewerbliche Zwecke, ausgenommen Presse, Rundfunk und Fernsehen und wissenschaftliche Publikationen, bedarf der Genehmigung durch die Statistik der Bundesagentur f\u00fcr Arbeit.", + "attribution": "\u00a9 Statistik der Bundesagentur f\u00fcr Arbeit" + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": "https://statistik.arbeitsagentur.de/DE/Statischer-Content/Servicebereich-Navigation/Bezugsbedingungen.html?nn=6654", + "instruction": "Sie k\u00f6nnen Informationen speichern, (auch auszugsweise) mit Quellenangabe weitergeben, vervielf\u00e4ltigen und verbreiten. Die Inhalte d\u00fcrfen nicht ver\u00e4ndert oder verf\u00e4lscht werden. Eigene Berechnungen sind erlaubt, jedoch als solche kenntlich zu machen. Im Falle einer Zug\u00e4nglichmachung im Internet soll dies in Form einer Verlinkung auf die Homepage der Statistik der Bundesagentur f\u00fcr Arbeit erfolgen. Die Nutzung der Inhalte f\u00fcr gewerbliche Zwecke, ausgenommen Presse, Rundfunk und Fernsehen und wissenschaftliche Publikationen, bedarf der Genehmigung durch die Statistik der Bundesagentur f\u00fcr Arbeit.", + "attribution": "\u00a9 Statistik der Bundesagentur f\u00fcr Arbeit" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Bevölkerung + +Einwohnerzahl nach Gemeinden des Statistischen Bundesamts. + +**Dataset: `raw/destatis_gv`** + +??? metadata "Metadata" + ```json + { + "name": "destatis_gv", + "title": "Adminstratives Gemeinndeverzeichnis", + "id": "destatis_gv", + "description": "Alle politisch selbst\u00e4ndigen Gemeinden mit ausgew\u00e4hlten Merkmalen am 31.12.2022 ", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "destatis", + "gemeindeverzeichnis" + ], + "publicationDate": "2023-01-12", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2022-02-14", + "timeseries": null + }, + "sources": [ + { + "title": "Statistisches Bundesamt", + "description": "Alle politisch selbst\u00e4ndigen Gemeineden mit ausgew\u00e4hlten Merkmalen am 31.12.2022 (4.Quartal)", + "path": "https://www.destatis.de/DE/Themen/Laender-Regionen/Regionales/Gemeindeverzeichnis/Administrativ/Archiv/GVAuszugQ/AuszugGV4QAktuell.html", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Statistisches Bundesamt (Destatis), 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Statistisches Bundesamt (Destatis), 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Regionalstatistik (GENESIS) + +Enthält folgende Datensätze der statistischen Ämter des Bundes und der Länder: + +### Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4) + +Jahreserhebung ü. die Energieverwendung der Betriebe im verarbeitendem Gewerbe. + +Der Datensatz umfasst: + +- Betriebe des Verarbeitenden Gewerbes sowie des Bergbaus +und der Gewinnung von Steinen und Erden von Unternehmen des +Produzierenden Gewerbes mit im Allgemeinen 20 und mehr +Beschäftigten. +- Betriebe des Verarbeitenden Gewerbes sowie des Bergbaus +und der Gewinnung von Steinen und Erden mit im Allgemeinen +20 und mehr Beschäftigten von Unternehmen der übrigen +Wirtschaftsbereiche. +Die Berichterstattung schließt Verarbeitende Betriebe des +Handwerks ein. +Bei 7 Wirtschaftszweigen gilt eine Abschneidegrenze von 10 +Beschäftigten. Die Merkmalswerte beziehen sich auf den +gesamten Betrieb, schließen damit die nicht produzierenden +Betriebsteile mit ein. +Maßgebend für die Zuordnung ist ab 2008 die „Klassifikation +der Wirtschaftszweige, Ausgabe 2008 (WZ 2008)“, und zwar +die Abschnitte B und C. + +- Datei: `43531-01-02-4.xlsx` +- Stand: 2021 + +### Betriebe, tätige Personen, Bruttoentgelte (42111-01-04-5) + +Jahreserhebung ü. Betriebe, tätige Personen und Bruttoentgelte der Betriebe im +verarbeitendem Gewerbe. + +Der Datensatz umfasst: + +- Sämtliche Betriebe des Wirtschaftsbereiches Verarbeitendes +Gewerbe sowie Bergbau und Gewinnung von Steinen und Erden, +wenn diese Betriebe zu Unternehmen des Bereiches +Verarbeitendes Gewerbe sowie Bergbau und Gewinnung von +Steinen und Erden gehören und in diesen Unternehmen +mindestens 20 Personen tätig sind; +- die Betriebe des Wirtschaftsbereiches Verarbeitendes +Gewerbe sowie Bergbau und Gewinnung von Steinen und Erden +mit mindestens 20 tätigen Personen, sofern diese Betriebe +zu Unternehmen gehören, deren wirtschaftlicher Schwerpunkt +außerhalb des Bereiches Verarbeitendes Gewerbe sowie +Bergbau und Gewinnung von Steinen und Erden liegt. +Bei 7 kleinbetrieblich strukturierten Branchen gilt eine +untere Erfassungsgrenze von 10 tätigen Personen. +Die Auswahl erfolgt jeweils nach dem Beschäftigtenstand Ende +September des Vorjahres. Die ausgewiesene Beschäftigtenzahl +betrifft dagegen die von Ende September des Berichtsjahres. +Die Merkmalswerte beziehen sich auf den gesamten Betrieb, +schließen damit die nicht produzierenden Betriebsteile mit +ein. +Maßgebend für die Zuordnung ist ab 2009 die „Klassifikation +der Wirtschaftszweige, Ausgabe 2008 (WZ 2008)“, und zwar +die Abschnitte B und C. + +- Datei: `42111-01-04-5.xlsx` +- Stand: 30.09.2021 + +### Gebäude mit Wohnraum nach Heizungsart (31211-04-01-5-B) + +Zensus 2011: Gebäude mit Wohnraum nach Heizungsart + +- Datei: `31211-04-01-5-B.xlsx` +- Stand: 09.05.2011 + +### Gebäude mit Wohnraum nach Heizungsart (31231-02-01-5) + +Bestand an Wohngebäuden und Wohnungen in Wohn- und Nichtwohngebäuden - +Fortschreibung auf Basis der endgültigen Ergebnisse der Gebäude- und +Wohnungszählung 2011 (Zensus 2011). + +- Datei: `31231-02-01-5.xlsx` +- Stand: 31.12.2021 + +**Dataset: `raw/regiostat`** + +??? metadata "Metadata" + ```json + { + "name": "regiostat", + "title": "Regionalstatistik (GENESIS)", + "id": "regiostat", + "description": "Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4), Betriebe, t\u00e4tige Personen, Bruttoentgelte (42111-01-04-5), Geb\u00e4ude mit Wohnraum nach Heizungsart (31211-04-01-5-B), Geb\u00e4ude mit Wohnraum nach Heizungsart (31231-02-01-5)", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Regionalstatistik", + "Energieverwendung", + "verarbeitendes Gewerbe", + "t\u00e4tige Personen", + "Bruttoentgelte", + "Geb\u00e4ude", + "Heizungsart" + ], + "publicationDate": null, + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2021-01-01", + "timeseries": null + }, + "sources": [ + { + "title": "Regionalstatistik (GENESIS)", + "description": "Energieverwendung der Betriebe im Verarbeitenden Gewerbe (43531-01-02-4), Betriebe, t\u00e4tige Personen, Bruttoentgelte (42111-01-04-5), Geb\u00e4ude mit Wohnraum nach Heizungsart (31211-04-01-5-B), Geb\u00e4ude mit Wohnraum nach Heizungsart (31231-02-01-5)", + "path": [ + "https://www.regionalstatistik.de/genesis//online?operation=table&code=43531-01-02-4", + "https://www.regionalstatistik.de/genesis//online?operation=table&code=42111-01-04-5" + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Statistische \u00c4mter des Bundes und der L\u00e4nder, 2023" + } + ] + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ] + } + ], + "licenses": [ + { + "name": null, + "title": null, + "path": null, + "instruction": null, + "attribution": null + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Geodaten PV- und Windflächenrechner + +Geodaten aus dem [PV- und Windflächenrechner](https://www.agora-energiewende.de/service/pv-und-windflaechenrechner/). + +Mehr Informationen: + +- [Begleitdokument](https://zenodo.org/record/6794558) +- [Geodaten Potenzialflächen](https://zenodo.org/record/6728382) + +Enthält: + +- Geodaten +- Metadaten +- App-Datapackage + +**Dataset: `raw/rli_pv_wfr`** + +??? metadata "Metadata" + ```json + { + "name": "rli_pv_wfr", + "title": "Geodaten PV- und Windfl\u00e4chenrechner", + "id": "rli_pv_wfr", + "description": "Geodaten aus dem PV- und Windfl\u00e4chenrechner", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Geodaten", + "PV-Fl\u00e4chenrechner", + "Windfl\u00e4chenrechner", + "Potentialfl\u00e4chen" + ], + "publicationDate": "2022-06-05", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-06-05", + "timeseries": null + }, + "sources": [ + { + "title": "Geodaten PV- und Windfl\u00e4chenrechner", + "description": "Geodaten aus dem PV- und Windfl\u00e4chenrechner", + "path": "https://zenodo.org/record/6728382", + "licenses": null + }, + { + "title": null, + "description": null, + "path": null, + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "\u00a9 Reiner Lemoine INstitut, 2022" + } + ] + } + ], + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "\u00a9 Reiner Lemoine INstitut, 2022" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Regionalplan Anhalt-Bitterfeld-Wittenberg + +Geodatensätze aus Teilplänen Wind 2018 und 2027 der Regionalen +Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg. + +### Sachlicher Teilplan Wind 2018 + +Geodaten aus rechtskräftigem +[Sachlichen Teilplan Wind 2018](https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2018/). + +> Im Sachlichen Teilplan "Nutzung der Windenergie in der Planungsregion +> Anhalt-Bitterfeld-Wittenberg" vom 30.05.2018 werden 22 Vorranggebiete für die +> Nutzung der Windenergie mit der Wirkung von Eignungsgebieten festgelegt. Sie +> dienen der raumordnerischen Steuerung der Errichtung von raumbedeutsamen +> Windenergieanlagen in Konzentrationszonen. +> +> Die oberste Landesentwicklungsbehörde hat am 01.08.2018 die Genehmigung +> erteilt. Mit Bekanntmachung der Genehmigung tritt der Sachliche Teilplan in +> Kraft. + +Dateien: + +- Vorrang-/Eignungsgebiete: `stp_2018_vreg.gpkg` + ([Quelle](https://gis.planungsregion-abw.de/geoserver/stp_wind2018/ows?SERVICE=WFS&REQUEST=GetCapabilities)) + +### Sachlicher Teilplan Wind 2027 + +Geodaten aus Planentwurf des +[Sachlichen Teilplan Wind 2027](https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2027/). + +> Die Regionalversammlung hat am 03.03.2023 beschlossen, den Sachlichen +> Teilplan "Windenergie 2027 in der Planungsregion Anhalt-Bitterfeld-Wittenberg" +> aufzustellen und mit der Bekanntgabe der Allgemeinen Planungsabsicht die +> beabsichtigten Auswahlkriterien und mögliche Gebietskulisse der Vorranggebiete +> für die Nutzung der Windenergie bzw. für Repowering von Windenergieanlagen +> vorzustellen. + +Dateien: + +- Suchräume: `stp_2027_suchraum.gpkg` (Quelle: RPG ABW) +- Planabsicht Vorranggebiete: `stp_2027_ideen_vr.gpkg` (Quelle: RPG ABW) +- Planabsicht Repoweringgebiete: `stp_2027_ideen_repower.gpkg` (Quelle: RPG ABW) + +**Dataset: `raw/rpg_abw_regional_plan`** + +??? metadata "Metadata" + ```json + { + "name": "rpg_abw_regional_plan", + "title": "Regionalplan Anhalt-Bitterfeld-Wittenberg", + "id": "rpg_abw_regional_plan", + "description": "Geodatens\u00e4tze aus Teilpl\u00e4nen Wind 2018 und 2027 der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Geodatens\u00e4tze", + "Teilpl\u00e4ne", + "PLanungsgemeinschaft" + ], + "publicationDate": "2018-05-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "ABW", + "extent": "ABW", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-03-03", + "timeseries": null + }, + "sources": [ + { + "title": "Regionalplan Anhalt-Bitterfeld-Wittenberg", + "description": "Geodatens\u00e4tze aus Teilpl\u00e4nen Wind 2018 und 2027 der Regionalen Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg.", + "path": [ + "https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2018/", + "https://www.planungsregion-abw.de/regionalplanung/teilplan-windenergie/teilplan-2027/" + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / Jahr" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Regionale Planungsgemeinschaft Anhalt-Bitterfeld-Wittenberg / Jahr" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Marktstammdatenregister Datenkorrektur PV + +Überprüfung und manuelle Datenkorrektur der Photovoltaikanlagen aus dem +prozessierten Marktstammdatenregister (Datensatz: +[bnetza_mastr](../../digipipe/store/raw/bnetza_mastr/dataset.md)). + +### Plausibiltätsprüfung + +Um grobe Fehler herauszufiltern wird überprüft, ob + +- Anlage in Betrieb ist (status = "In Betrieb"), +- Anlage Strom produziert, +- Brutto- und Nettokapazität plausibel sind und +- die Kategorisierung, d.h. Zuordnung eine PV-Anlage zu Freifläche oder Dach, + plausibel ist (manuelle, visuelle Prüfung von geolokalisierten + PV-Aufdachanlagen anhand von + [Orthofotos](https://www.geodatenportal.sachsen-anhalt.de/wss/service/ST_LVermGeo_DOP_WMS_OpenData/guest)) + +### Dateien + +- Korrektur Freiflächenanlagen `bnetza_mastr_pv_ground_region_correction.ods` +- Korrektur Aufdachanlagen `bnetza_mastr_pv_roof_region_correction.ods` + +mit Spalten: + +- _mastr_id_: ID aus dem MaStR +- _reason_: Fehler (wrong_type, wrong_position) +- _wrong_attr_: Fehlerhaftes Attribut +- _correction_: Korrigierter Attributwert (None, wenn Korrektur nicht möglich). + Korrigierte Geometrien liegen in EPSG:3035 vor. + +**Dataset: `raw/bnetza_mastr_correction_region`** + +??? metadata "Metadata" + ```json + { + "name": "bnetza_mastr_correction", + "title": "Marktstammdatenregisterdaten - Manuelle Korrektur", + "id": "bnetza_mastr", + "description": "Daten aus dem Marktstammdatenregister der Bundesnetzagentur", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Markstammdatenregister", + "openmastr", + "mastr" + ], + "publicationDate": "2022-12-19", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-19", + "timeseries": null + }, + "sources": [ + { + "title": "Marktstammdatenregister", + "description": "Marktstammdatenregister der Bundesnetzagentur Deutschland", + "path": "https://www.marktstammdatenregister.de/MaStR/Datendownload", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland \u2013 Namensnennung \u2013 Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Marktstammdatenregister 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland \u2013 Namensnennung \u2013 Version 2.0?", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Marktstammdatenregister 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Erzeugungsanlagen aus Marktstammdatenregister + +Ereugungsanlagen aus dem Markstammdatenregister, das mit dem Tool +[open-mastr](https://github.com/OpenEnergyPlatform/open-MaStR) erstellt und +abgelegt wurde. Die Daten wurden folgendermaßen erstellt: +``` +from open_mastr import Mastr +db = Mastr() +db.download("bulk") +db.to_csv(None) # (None for all data) +``` + +Die abgelegten CSV-Dateien (alle Tabellen) wurden um einen benutzerdefinierten +Export von Speichereinheiten mit +`sqlite3 -header -csv -separator "," open-mastr.db "select * from storage_units;" > bnetza_mastr_storage_unit_raw.csv` +erweitert. Anschließend wurden alle Dateien komprimiert. + +Das Marktstammdatenregister (MaStR) ist ein deutsches Register, welches von der +Bundesnetzagentur (BNetza) bereitgestellt wird und alle in Deutschland +befindlichen Strom- und Gasanlagen erfasst. + +**Dataset: `raw/bnetza_mastr`** + +??? metadata "Metadata" + ```json + { + "name": "bnetza_mastr", + "title": "Marktstammdatenregisterdaten", + "id": "bnetza_mastr", + "description": "Daten aus dem Marktstammdatenregister der Bundesnetzagentur", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Markstammdatenregister", + "openmastr", + "mastr" + ], + "publicationDate": "2022-12-19", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2022-12-19", + "timeseries": null + }, + "sources": [ + { + "title": "Marktstammdatenregister", + "description": "Marktstammdatenregister der Bundesnetzagentur Deutschland", + "path": "https://www.marktstammdatenregister.de/MaStR/Datendownload", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland \u2013 Namensnennung \u2013 Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Marktstammdatenregister 2023" + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland \u2013 Namensnennung \u2013 Version 2.0?", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Marktstammdatenregister 2023" + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-28", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Energiedaten Sachsen-Anhalt + +Datensätze zur Energie- und Wasserversorgung des Statistischen Landesamtes +Sachsen-Anhalt. + +### Daten + +Stromverbrauch der Industriebetriebe nach Kreisen 2003-2021 in MWh + +- [Quelle](https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986) + +**Dataset: `raw/stala_st_energy`** + +??? metadata "Metadata" + ```json + { + "name": "stala_st_energy", + "title": "Energiedaten Sachsen-Anhalt", + "id": "stala_st_energy", + "description": "Datens\u00e4tze zur Energie- und Wasserversorgung des Statistischen Landesamtes Sachsen-Anhalt.", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Energiedaten", + "Energieversorgung", + "Wasserversorgung" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Sachsen-Anhalt", + "extent": "Sachsen-Anhalt", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": [ + { + "start": "2003-01-01T00:00+01", + "end": "2021-12-31T23:00+01", + "resolution": "1 a", + "alignment": "left" + } + ] + }, + "sources": [ + { + "title": "Energiedaten Sachsen-Anhalt", + "description": "Datens\u00e4tze zur Energie- und Wasserversorgung des Statistischen Landesamtes Sachsen-Anhalt.", + "path": "https://statistik.sachsen-anhalt.de/themen/wirtschaftsbereiche/energie-und-wasserversorgung/tabellen-energieverwendung#c206986", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Statistisches Landesamt Sachsen-Anhalt, Halle (Saale)." + } + ] + } + ], + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets; be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": "\u00a9 Statistisches Landesamt Sachsen-Anhalt, Halle (Saale)." + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## DemandRegio + +Regionalisierte Bevölkerungsprognose, Haushalte sowie Strom- und Gasbedarfe +inkl. Zeitreihen auf Landkreisebene. + +Die Daten wurden abgerufen mit einer +[modifizierten Version des DemandRegio disaggregators](https://github.com/nesnoj/disaggregator), +in der softwareseitige, jedoch keine methodischen Änderungen vorgenommen wurden. + +Der disaggregator basiert auf Daten bis 2017, anschließende Jahre werden +fortgeschrieben. + +Weitere Informationen zum Projekt DemandRegio: + +- [Abschlussbericht](https://www.ffe.de/wp-content/uploads/2020/10/DemandRegio_Abschlussbericht.pdf) +- [Abschlussworkshop](https://www.tu.berlin/er/forschung/projekte/demandregio-2) + +Die erzeugten Rohdaten wie unten beschrieben wurden mittels +[API](http://opendata.ffe.de:4000/) abgerufen. Diese können alternativ direkt +vom [OpenData-Portal der FfE](https://opendata.ffe.de/project/demandregio/) +bezogen werden. + +Verwendetes Wetterjahr für Gasbedarfszeitreihen: 2011 + +**Installation (in separater venv):** + +```commandline +pip install disaggregator@git+https://github.com/nesnoj/disaggregator.git#egg=disaggregator +``` + +### Details zum Datenabruf + +#### Bevölkerung + +Bevölkerung (Summe) und Bevölkerung je Haushaltsgröße (1, 2, 3, 4, 5, >5) je +NUTS3. + +Jahre: + +- Bevölkerung bis 2017 historische Werte +- Bevölkerung ab 2018 prognostizierte Werte basierend auf der 14. koordinierten + Bevölkerungsvorausberechnung der Statistischen Ämter von Bund und Ländern. +- Haushalte nur 2011 + +```python +import pandas as pd +from disaggregator import data + +## Population +dr_hh_population = pd.DataFrame() +for year in [2010, 2015, 2017, 2020, 2021, 2022, 2025, 2030, 2035, 2040, 2045]: + dr_hh_population[year] = round(data.population(year=year)).astype(int) + +dr_hh_population.to_csv("dr_hh_population.csv") + +## Households +data.households_per_size().to_csv("dr_hh_households_2011.csv") +``` + +#### Haushalte: Strom + +Bedarfe und SLP-Zeitreihen je NUTS3 mit Bottom-Up-Methode nach Haushaltsgröße. + +Jahre: + +- 2017: Letzte verfügbare Daten +- 2022: Status quo, Fortschreibung mit Berücksichtigung Demografie und + Wanderung +- 2035: Fortschreibungsjahr mit Berücksichtigung Demografie und Wanderung +- 2045: Fortschreibungsjahr + +```python +from disaggregator import spatial, temporal + +## Consumption +spatial.disagg_households_power( + by="households", + weight_by_income=True, + year=2022, + scale_by_pop=True, +).to_csv(f"dr_hh_power_demand_2022.csv") + +## Timeseries +temporal.disagg_temporal_power_housholds_slp( + use_nuts3code=True, + by="households", + weight_by_income=True, + year=2022, + scale_by_pop=True, +).to_csv(f"dr_hh_power_timeseries_2022.csv") +``` + +#### Haushalte: Gas + +Zeitreihen je NUTS3 + +```python +from disaggregator import temporal + +## Timeseries +temporal.disagg_temporal_gas_households( + use_nuts3code=True, + how='top-down', + year=2011, +).to_csv(f"dr_hh_gas_timeseries_2011.csv") +``` + + +#### GHD und Industrie: Strom + +Bedarfe und Zeitreihen je NUTS3: + +- Bedarfe: Je Wirtschaftszweig (WZ), abzüglich Eigenerzeugung +- Zeitreihen: Für alle WZ bedarfsgewichtet aggregiert, Einzelprofile basieren + je nach WZ auf gemessenen oder SLP inkl. Wanderung +- Letzte verfügbare Daten aus 2017, Fortschreibung für 2022 mit + Berücksichtigung Beschäftigte und Effizienzgewinne + +```python +from disaggregator import spatial, temporal + +######## +## CTS # +######## + +## Consumption +spatial.disagg_CTS_industry( + sector='CTS', + source='power', + use_nuts3code=True, + year=2022, +).to_csv("dr_cts_power_demand_2022.csv") +## Timeseries +temporal.disagg_temporal_power_CTS( + detailed=False, + use_nuts3code=True, + year=2022, +).to_csv("dr_cts_power_timeseries_2022.csv") + +############# +## Industry # +############# + +## Consumption +spatial.disagg_CTS_industry( + sector='industry', + source='power', + use_nuts3code=True, + year=2022, +).to_csv("dr_ind_power_demand_2022.csv") +## Timeseries +temporal.disagg_temporal_industry( + source="power", + detailed=False, + use_nuts3code=True, + no_self_gen=False, + year=2022, +).to_csv("dr_ind_power_timeseries_2022.csv") +``` + +#### GHD: Gas + +Zeitreihen je NUTS3 für alle WZ bedarfsgewichtet aggregiert, Einzelprofile +basieren je nach WZ auf gemessenen oder SLP inkl. Wanderung. Letzte verfügbare +Daten aus 2017, Fortschreibung für 2022 mit Berücksichtigung Beschäftigte und +Effizienzgewinne. + +```python +from disaggregator import spatial, temporal + +## Timeseries +x=temporal.disagg_temporal_gas_CTS( + detailed=False, + use_nuts3code=True, + year=2011, +).to_csv("dr_cts_gas_timeseries_2011.csv") +``` + +#### Industrie: Gas + +Bedarfe und Zeitreihen je NUTS3: + +- Bedarfe: Je Wirtschaftszweig (WZ), abzüglich Eigenerzeugung +- Zeitreihen: Für alle WZ bedarfsgewichtet aggregiert, Einzelprofile basieren + je nach WZ auf gemessenen oder SLP inkl. Wanderung +- Letzte verfügbare Daten aus 2017, Fortschreibung für 2022 mit + Berücksichtigung Beschäftigte und Effizienzgewinne + +```python +from disaggregator import spatial, temporal + +## Consumption +spatial.disagg_CTS_industry( + sector='industry', + source='gas', + use_nuts3code=True, + year=2022, +).to_csv("dr_ind_gas_demand_2022.csv") +## Timeseries +x=temporal.disagg_temporal_industry( + source="gas", + detailed=False, + use_nuts3code=True, + no_self_gen=False, + year=2011, +).to_csv("dr_ind_gas_timeseries_2011.csv") +``` + +**Dataset: `raw/demandregio`** + +??? metadata "Metadata" + ```json + { + "name": "demandregio", + "title": "DemandRegio", + "id": "demandregio", + "description": "Regionalisierte Bev\u00f6lkerungsprognose, Haushalte sowie Strom- und Gasbedarfe inkl. Zeitreihen auf Landkreisebene.", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "Bev\u00f6lkerung", + "Bev\u00f6lkerungsprognose", + "Strombedarf", + "Gasbedarf", + "Haushalte", + "disaggregator", + "disaggregation" + ], + "publicationDate": "2020-09-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "NUTS-3" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": [ + { + "start": "2022-01-01T00:00+01", + "end": "2022-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2035-01-01T00:00+01", + "end": "2035-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2045-01-01T00:00+01", + "end": "2045-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + }, + { + "start": "2022-01-01T00:00+01", + "end": "2022-12-31T23:00+01", + "resolution": "1 h", + "alignment": null, + "aggregationType": "sum" + } + ] + }, + "sources": [ + { + "title": "DemandRegio", + "description": "Regionalisierte Bev\u00f6lkerungsprognose, Haushalte sowie Strom- und Gasbedarfe inkl. Zeitreihen auf Landkreisebene.", + "path": "https://github.com/nesnoj/disaggregator/", + "licenses": [ + { + "name": "CC BY 4.0 DE", + "title": "Creative Commons Namensnennung 4.0 Deutschland", + "path": "https://creativecommons.org/licenses/by/4.0/deed.de", + "instruction": "Sie m\u00fcssen angemessene Urheber- und Rechteangaben machen, einen Link zur Lizenz beif\u00fcgen und angeben, ob \u00c4nderungen vorgenommen wurden. Diese Angaben d\u00fcrfen in jeder angemessenen Art und Weise gemacht werden, allerdings nicht so, dass der Eindruck entsteht, der Lizenzgeber unterst\u00fctze gerade Sie oder Ihre Nutzung besonders.", + "attribution": "\u00a9 FZJ, TUB, FfE" + }, + { + "name": "GNU FDL", + "title": "GNU General Public License v3.0", + "path": "https://www.gnu.org/licenses/gpl-3.0.en.html", + "instruction": "Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed", + "attribution": "\u00a9 FZJ, TUB, FfE" + } + ] + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-08-15", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## OpenStreetMap + +OpenStreetMap Datenauszug Deutschland. + +Quelle: https://download.geofabrik.de/europe/germany-230101.osm.pbf + +Ist nicht Teil des Eingangsdaten-Packages - manueller Download erforderlich. + +**Dataset: `raw/osm`** + +??? metadata "Metadata" + ```json + { + "name": "openstreetmap", + "title": "", + "id": "openstreetmap", + "description": "OpenStreetMap extract", + "language": [ + "de-DE", + "en-GB" + ], + "subject": [], + "keywords": [ + "openstreetmap", + "osm" + ], + "publicationDate": "2023-06-30", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "" + }, + "temporal": { + "referenceDate": "2023-06-30", + "timeseries": [] + }, + "sources": [ + { + "title": "OpenStreetMap Data Extracts (Geofabrik)", + "description": "Full data extract of OpenStreetMap data", + "path": "https://download.geofabrik.de/europe/germany-230101.osm.pbf", + "licenses": [ + { + "name": "ODbL-1.0", + "title": "Open Data Commons Open Database License 1.0", + "path": "https://opendatacommons.org/licenses/odbl/1.0/", + "instruction": "You are free: To Share, To Create, To Adapt; As long as you: Attribute, Share-Alike, Keep open!", + "attribution": "\u00a9 OpenStreetMap contributors" + } + ] + } + ], + "licenses": [ + { + "name": "ODbL-1.0", + "title": "Open Data Commons Open Database License 1.0", + "path": "https://opendatacommons.org/licenses/odbl/1.0/", + "instruction": "You are free: To Share, To Create, To Adapt; As long as you: Attribute, Share-Alike, Keep open!", + "attribution": "\u00a9 OpenStreetMap contributors" + } + ], + "contributors": [ + { + "title": "nesnoj", + "email": "jonathan.amme@rl-institut.de", + "date": "2023-06-30", + "object": "metadata", + "comment": "Create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "oemetadata_v1.5.1", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## EE-Einspeisezeitreihen + +Einspeisezeitreihen für Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. +Als Wetterjahr wird 2011 verwendet, siehe +[Szenarien](../../digipipe/store/../../docs/sections/scenarios.md). + +### Windenergie + +Stündlich aufgelöste Zeitreihe der Windenergie Einspeisung über 1 Jahr auf Basis +von [MaStR](../../digipipe/store/raw/bnetza_mastr/dataset.md) und +[renewables.ninja](http://renewables.ninja). +Auf einen Auflösung auf Gemeindeebene wird verzichtet, da die Differenz der +Produktion der Gemeinden nach renewables.ninja <5 % beträgt. + +#### Windenergieanlage (2022) + +Für renewables.ninja sind Position (lat, lon), Nennleistung (capacity), +Nabenhöhe und Turbinentyp erforderlich. + +##### Position + +Hierfür wird aus den Zentroiden der Gemeinden ein räumlicher Mittelwert +anhand des Datensatzes +[bkg_vg250_muns_region](../../digipipe/store/datasets/bkg_vg250_muns_region/dataset.md) +(`bkg_vg250_muns_region.gpkg`) gebildet: + +``` +import geopandas as gpd +import os.path + +def get_position(gdf): + df = gpd.read_file(gdf) + points_of_muns = df["geometry"].centroid + points_of_muns_crs = points_of_muns.to_crs(4326) + point_df = [ + points_of_muns_crs.y.sum()/len(points_of_muns), + points_of_muns_crs.x.sum()/len(points_of_muns) + ] + return point_df + +data_folder = os.path.join("your_data_folder") +muns_gpkg = os.path.join(data_folder, "bkg_vg250_muns_region.gpkg") +center_position = get_position(muns_gpkg) +``` + +##### Nennleistung + +Wird auf 1 MW gesetzt/normiert. + +##### Nabenhöhe + +Aus dem Datensatz +[bnetza_mastr_wind_region](../../digipipe/store/datasets/bnetza_mastr_wind_region/dataset.md) +(`bnetza_mastr_wind_agg_abw.gpkg`) wird ein Mittelwer von 100 m abgeleitet. + +``` +import geopandas as gpd + +df = gpd.read_file("bnetza_mastr_wind_agg_abw.gpkg") +height = df[["hub_height"]].mean() +``` + +##### Turbinentyp + +Annahme: Innerhalb eines Herstellers sind Leistungskurven sehr ähnlich. +Daher werden zwei größten Hersteller mit jeweiligen häufigsten Turbinentyp +ausgewählt - diese sind Enercon und Vestas mit ca. 70 % und ca. 30%. + +``` +import geopandas as gpd + +df = gpd.read_file("bnetza_mastr_wind_agg_abw.gpkg") +manufacturers = df[ + ["manufacturer_name", "status"] +].groupby("manufacturer_name").count().sort_values( + by="status", ascending=False +) +``` + +Häufigste Turbinentypen sind *Enercon E-70* und *Vestas V80*. Daher werden +*Enercon E70 2000* und *Vestas V80 2000* in renewables.ninja ausgewählt. + +``` +man_1 = manufacturers.index[0] +man_2 = manufacturers.index[1] + +type_1 = df[ + ["manufacturer_name", "type_name", "status"] +].where(df["manufacturer_name"] == man_1).groupby( + "type_name").count().sort_values(by="status", ascending=False) + +type_2 = df[ + ["manufacturer_name", "type_name", "status"] +].where(df["manufacturer_name"] == man_2).groupby( + "type_name").count().sort_values(by="status", ascending=False) +``` + +#### Raw Data von [renewables.ninja](http://renewables.ninja) API + +Es werden zwei Zeitreihen für oben beschriebenen Vergleichsanlagen berechnet: + +``` +import json +import requests +import pandas as pd +import geopandas as gpd + +def change_wpt(position, capacity, height, turbine): + args = { + 'lat': 51.8000, # 51.5000-52.0000 + 'lon': 12.2000, # 11.8000-13.1500 + 'date_from': '2011-01-01', + 'date_to': '2011-12-31', + 'capacity': 1000.0, + 'height': 100, + 'turbine': 'Vestas V164 7000', + 'format': 'json', + 'local_time': 'true', + 'raw': 'false', + } + + args['capacity'] = capacity + args['height'] = height + args['lat'] = position[0] + args['lon'] = position[1] + args['turbine'] = turbine + + return args + +def get_df(args): + token = 'Please get your own' + api_base = 'https://www.renewables.ninja/api/' + + s = requests.session() + # Send token header with each request + s.headers = {'Authorization': 'Token ' + token} + + url = api_base + 'data/wind' + + r = s.get(url, params=args) + + parsed_response = json.loads(r.text) + df = pd.read_json( + json.dumps(parsed_response['data']),orient='index') + metadata = parsed_response['metadata'] + return df + +enercon_production = get_df(change_wpt( + position, + capacity=1, + height=df[["hub_height"]].mean(), + turbine=enercon) +) + +vestas_production = get_df(change_wpt( + position, + capacity=1000, + height=df[["hub_height"]].mean(), + turbine=vestas) +) +``` + +#### Gewichtung und Skalierung der Zeitreihen + +Um die Charakteristika der beiden o.g. Anlagentypen zu berücksichtigen, erfolgt +eine gewichtete Summierung der Zeitreihen anhand der berechneten Häufigkeit. + +#### Zukunftsszenarien + +Analog zu dem oben beschriebenen Vorgehen wird eine separate Zeitreihe für +zukünftige WEA berechnet. Hierbei wird eine Enercon E126 6500 mit einer +Nabenhöhe von 159 m angenommen +([PV- und Windflächenrechner](https://zenodo.org/record/6794558)). + +Da die Zeitreihe sich nur marginal von der obigen Status-quo-Zeitreihe +unterscheidet, wird letztere sowohl für den Status quo als auch die +Zukunftsszenarien verwendet. + +- Einspeisezeitreihe: `wind_feedin_timeseries.csv` + +### Freiflächen-Photovoltaik + +#### PV-Anlage (2022) + +Stündlich aufgelöste Zeitreihe der Photovoltaikeinspeisung über 1 Jahr auf Basis +von [MaStR](../../digipipe/store/raw/bnetza_mastr/dataset.md) und +[renewables.ninja](http://renewables.ninja). +Wie bei der Windeinspeisung wird auf eine Auflsöung auf Gemeindeebene aufgrund +geringer regionaler Abweichungen verzichtet. + +Für die Generierung der Zeitreihe über +[renewables.ninja](http://renewables.ninja) +wird eine Position(lat, lon), Nennleistung (capacity), Verluste (system_loss) +Nachführung (tracking), Neigung (tilt) und der Azimutwinkel (azim) benötigt. + +Als Position wird analog zur Windenergieanlage der räumlicher Mittelwert +verwendet. Laut MaStR werden lediglich 13 Anlagen nachgeführt (0,01 % der +Kapazität), die Nachführung wird daher vernachlässigt. Die Neigung ist aus MaStR +nicht bekannt, es dominieren jedoch Anlagen auf Freiflächen sowie Flachdächern +im landwirtschaftlichen Kontext. Nach +[Ariadne Szenarienreport](https://ariadneprojekt.de/media/2022/02/Ariadne_Szenarienreport_Oktober2021_corr0222_lowres.pdf) +wird diese mit 30° angenommen. +Die Nennleistung Wird auf 1 MW gesetzt/normiert. + +#### Zukunftsszenarien + +Die Status-quo-Zeitreihe wird sowohl für den Status quo als auch die +Zukunftsszenarien verwendet. + +- Einspeisezeitreihe: `pv_feedin_timeseries.csv` + +### Solarthermie + +- Einspeisezeitreihe: `st_feedin_timeseries.csv` (Kopie von + PV-Einspeisezeitreihe) + +### Laufwasserkraft + +Hier wird eine konstante Einspeisung angenommen. + +- Einspeisezeitreihe: `ror_feedin_timeseries.csv` + +**Dataset: `raw/renewables.ninja_feedin`** + +??? metadata "Metadata" + ```json + { + "name": "renewables.ninja_feedin", + "title": "EE-Einspeisezeitreihen", + "id": "renewables.ninja_feedin", + "description": "Einspeisezeitreihen f\u00fcr Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. Als Wetterjahr wird 2011 verwendet", + "language": [ + "de-DE" + ], + "subject": null, + "keywords": [ + "Erneuerbare", + "Energien", + "Einspeisezeitreihen", + "renewables.ninja" + ], + "publicationDate": "2016-09-21", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": null, + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": null + }, + "temporal": { + "referenceDate": "2023-04-14", + "timeseries": [ + { + "start": "2011-01-01T00:00+01", + "end": "2011-12-31T23:00+01", + "resolution": "1 h", + "alignment": "left" + } + ] + }, + "sources": [ + { + "title": "renewables.ninja_feedin", + "description": "Einspeisezeitreihen f\u00fcr Erneuerbare Energien, normiert auf 1 MW bzw. 1 p.u. Als Wetterjahr wird 2011 verwendet", + "path": "hhttps://www.renewables.ninja/", + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "\u00a9 www.renewables.ninja, 2023" + } + ] + } + ], + "licenses": [ + { + "name": "CC BY-NC 4.0", + "title": "Data licence Germany \u2013 attribution \u2013 version 2.0", + "path": "https://creativecommons.org/licenses/by-nc/4.0/", + "instruction": "you are free to copy, redistribute and adapt them for non-commercial purposes, provided you give appropriate credit. Note that the data is made available as-is and without warranty. We cannot guarantee its accuracy, and accept no responsibility for any liability arising from its use. You are advised to examine the quality of the data for your intended purposes, and to consult the publications linked on this page.", + "attribution": "\u00a9 www.renewables.ninja, 2023" + } + ], + "contributors": [ + { + "title": "aaronschilling", + "email": "Aaron.Schilling@rl-institut.de", + "date": "2023-09-07", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": null, + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` + +------------------------------ +## Verwaltungsgebiete Deutschlands + +Verwaltungsgebiete Deutschlands (Verwaltungsgebiete 1:250 000). + +**Dataset: `raw/bkg_vg250`** + +??? metadata "Metadata" + ```json + { + "name": "bkg_vg250", + "title": "Adminstrative areas of Germany", + "id": "bkg_vb250", + "description": "Geopackage with administative areas of Germany - Verwaltungsgebiete 1:250 000", + "language": [ + "en-GB", + "de-DE" + ], + "subject": null, + "keywords": [ + "adminstrative areas", + "Verwaltungsgebiete" + ], + "publicationDate": "2022-01-01", + "context": { + "homepage": "https://abw.rl-institut.de", + "documentation": "https://digiplan.readthedocs.io", + "sourceCode": "https://github.com/rl-institut/digipipe/", + "contact": "https://reiner-lemoine-institut.de/ueber-uns/kontakt/", + "grantNo": "None", + "fundingAgency": "https://www.region-gestalten.bund.de", + "fundingAgencyLogo": "https://www.region-gestalten.bund.de/Region/SiteGlobals/Frontend/Images/logo.svg", + "publisherLogo": "https://reiner-lemoine-institut.de//wp-content/uploads/2015/09/rlilogo.png" + }, + "spatial": { + "location": "Germany", + "extent": "Germany", + "resolution": "1:250 000" + }, + "temporal": { + "referenceDate": "2022-01-01", + "timeseries": null + }, + "sources": [ + { + "title": "Bundesamt f\u00fcr Kartographie und Geod\u00e4sie - Verwaltungsgebiete 1:250 000 VG250 (Ebenen)", + "description": "Dieser Datensatz stellt die Verwaltungsgebiete 1:250 000 (VG250) mit Stand 01.01. f\u00fcr das Gebiet der Bundesrepublik Deutschland bereit.", + "path": "https://gdz.bkg.bund.de/index.php/default/digitale-geodaten/verwaltungsgebiete/verwaltungsgebiete-1-250-000-stand-01-01-vg250-01-01.html", + "licenses": [ + { + "name": "DL-DE-BY-2.0", + "title": "Open Data Datenlizenz Deutschland \u2013 Namensnennung \u2013 Version 2.0", + "path": "http://www.govdata.de/dl-de/by-2-0", + "instruction": "The data and meta-data provided may, for commercial and non-commercial use, in particular be copied, printed, presented, altered, processed and transmitted to third parties; be merged with own data and with the data of others and be combined to form new and independent datasets;be integrated in internal and external business processes, products and applications in public and non-public electronic networks.", + "attribution": " \u00a9 GeoBasis-DE / BKG - 2022" + } + ] + } + ], + "contributors": [ + { + "title": "hedwiglieselotte", + "email": "hedwig.bartels@rl-institut.de", + "date": "2023-03-23", + "object": "metadata", + "comment": "create metadata" + } + ], + "resources": [ + { + "profile": null, + "name": null, + "path": null, + "format": null, + "encoding": null, + "schema": { + "fields": [], + "primaryKey": [], + "foreignKeys": [] + }, + "dialect": { + "delimiter": "", + "decimalSeparator": "." + } + } + ], + "@id": null, + "@context": "https://raw.githubusercontent.com/OpenEnergyPlatform/oemetadata/develop/metadata/latest/context.json", + "review": { + "path": "", + "badge": "" + }, + "metaMetadata": { + "metadataVersion": "OEP-1.5.2", + "metadataLicense": { + "name": "CC0-1.0", + "title": "Creative Commons Zero v1.0 Universal", + "path": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + "_comment": { + "metadata": "Metadata documentation and explanation (https://github.com/OpenEnergyPlatform/oemetadata)", + "dates": "Dates and time must follow the ISO8601 including time zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss\u00b1hh)", + "units": "Use a space between numbers and units (100 m)", + "languages": "Languages must follow the IETF (BCP47) format (en-GB, en-US, de-DE)", + "licenses": "License name must follow the SPDX License List (https://spdx.org/licenses/)", + "review": "Following the OEP Data Review (https://github.com/OpenEnergyPlatform/data-preprocessing/blob/master/data-review/manual/review_manual.md)", + "null": "If not applicable use: null", + "todo": "If a value is not yet available, use: todo" + } + } + ``` diff --git a/docs/datasets/region_specific_datasets.md b/docs/datasets/region_specific_datasets.md new file mode 100644 index 00000000..93e01cb3 --- /dev/null +++ b/docs/datasets/region_specific_datasets.md @@ -0,0 +1,17 @@ +# Regionsspezifische Datensätze + +Die meisten der Rohdatensätze können für eine beliebige Region in Deutschland +verwendet werden. Einige sind jedoch nur für eine Teilregion verfügbar oder +spezifisch für die ABW-Region: + + +| Raw-Datensatz | Räuml. Ausdehnung | Alternativer Datensatz | +|--------------------------------|-----------------------------|-------------------------------------------------------------| +| bnetza_mastr_correction_region | Region ABW | | +| dwd_temperature | Region ABW | [DWD CDC](https://opendata.dwd.de/climate_environment/CDC/) | +| emissions | Sachsen-Anhalt / Region ABW | | +| renewables.ninja_feedin | Region ABW | renewables.ninja_feedin | +| rpg_abw_pv_roof_potential | Region ABW | | +| rpg_abw_regional_plan | Region ABW | | +| stala_st_energy | Sachsen-Anhalt | | +| stala_st_pop_prog | Sachsen-Anhalt | | diff --git a/docs/generate_dataset_mds.py b/docs/generate_dataset_mds.py new file mode 100644 index 00000000..e51654a1 --- /dev/null +++ b/docs/generate_dataset_mds.py @@ -0,0 +1,93 @@ +import os +import json +import re + + +def generate_dataset_mds(): + """Create markdown files for each dataset category + + This function generates/updates Markdown files based on the content of + 'dataset.md' and 'metadata.json' in each dataset folder per category. + One Markdown file per category is generated (containing data for every + dataset of this category). + The Markdown files are saved to the 'docs/datasets' directory. + + Returns + ------- + None + """ + source_dir = "digipipe/store" + target_dir = "docs/datasets" + + categories = ["raw", "preprocessed", "datasets", "appdata"] + + for category in categories: + category_dir = os.path.join(source_dir, category) + category_md_file = os.path.join(target_dir, f"{category}_datasets.md") + + with open(category_md_file, "w") as md_file: + # Write category heading + md_file.write(f"# '{category.capitalize()}' Datasets \n") + + # Traverse the category directory + for root, dirs, files in os.walk(category_dir): + if "TEMPLATE" in dirs: + dirs.remove("TEMPLATE") + for file in files: + if file == "dataset.md": + dataset_md_file = os.path.join(root, file) + # Write the content of the dataset.md file with replacements + with open(dataset_md_file, "r") as dataset_file: + md_file.write("\n------------------------------\n") + for line in dataset_file: + line = re.sub( + r"\(\.\./\.\./", + "(../../digipipe/store/", + line, + ) + line = re.sub( + r"\(config.yml\)", + f"(../../digipipe/store/{category}/{os.path.basename(root)}/config.yml)", + line, + ) + line = re.sub( + r"\(\.\./([a-zA-Z])", + fr"(../../digipipe/store/{category}/\1", + line, + ) + line = re.sub( + r"\(\.\./\.\./\.\./\.\./\.\./docs/sections/", + "(../sections/", + line, + ) + if line.startswith("#"): + line = "#" + line # Add '#' to the line + md_file.write(line) + md_file.write("\n") + md_file.write( + f"**Dataset: " + f"`{category}/{os.path.basename(root)}`**\n\n" + ) + + # Check if the corresponding metadata.json file exists + metadata_json_file = os.path.join(root, "metadata.json") + if os.path.exists(metadata_json_file): + # Read the metadata from metadata.json + with open(metadata_json_file, "r") as metadata_file: + metadata = json.load(metadata_file) + # Write the metadata section + md_file.write('??? metadata "Metadata"\n') + md_file.write(" ```json\n") + md_file.write( + " " + + json.dumps(metadata, indent=4).replace( + "\n", "\n " + ) + ) + md_file.write("\n ```\n") + print(f"Generated {category}_datasets.md") + + print("Generation of dataset category markdown files completed.") + + +generate_dataset_mds() diff --git a/docs/img/datasets/pipeline_dataflow_example.dia b/docs/img/datasets/pipeline_dataflow_example.dia new file mode 100644 index 00000000..9eea7cbe Binary files /dev/null and b/docs/img/datasets/pipeline_dataflow_example.dia differ diff --git a/docs/img/datasets/pipeline_dataflow_example.png b/docs/img/datasets/pipeline_dataflow_example.png new file mode 100644 index 00000000..a8ce0ddc Binary files /dev/null and b/docs/img/datasets/pipeline_dataflow_example.png differ diff --git a/docs/img/esys/DigiPlan_Energiesystem.svg b/docs/img/esys/DigiPlan_Energiesystem.svg new file mode 100644 index 00000000..5aab4c51 --- /dev/null +++ b/docs/img/esys/DigiPlan_Energiesystem.svg @@ -0,0 +1,4808 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Biogasupgrading plant + + + Biomass + BiomassImport + + + + + + + + + + + Dec.CHP + + + + + + + + + + Cen.CHP + + + + + + + Biogas + + + + + + + Biogasplant + + + + + + + + Lignite + LigniteImport + + + CoalOven + + + + + + + + + + + + Wood + WoodImport + + + WoodOven + + + + + + + Cen.CCGT + + + Dec.CCGT + + + + + + + + + + + + + + + + + + MethaneImport + + + + + Methane + + GT + + + + + + Cen.CHP + + + + + Cen. Gasboiler + + + + + Dec. Gasboiler + + + + + + + + + + + + Cen.CCGT + + Dec.CCGT + + + + + + + Dec.CHP + + + + + + + + + + + + + + + + + + + ElectricityImport + + ElectricityExport + + Electricitydemandsectors i + + Electricity + + LargeBattery + + + + i = {HH, CTSA, Industry} + + Wind + + + PV (ground) + + + PV (rooftop) + + + + BEVdemand + + + + + + Dec. HP + + Dec. PtH + + + + + + + + + Cen. HP + + Cen. PtH + + + + RoR + + + + + Central heat + + + Decentral heat + + Dec.TES + + Cen.TES + + + + + Cen. heatdemandsectors i + Dec. heatdemandsectors i + + Dec. Solarthermalcollector + + + + + + SmallBattery + Cen. Solarthermalcollector + + + + + diff --git a/docs/img/logos/RLI_DigiPlan_logos.png b/docs/img/logos/RLI_DigiPlan_logos.png new file mode 100644 index 00000000..1a47339c Binary files /dev/null and b/docs/img/logos/RLI_DigiPlan_logos.png differ diff --git a/docs/img/logos/RLI_DigiPlan_logos_horizontal.png b/docs/img/logos/RLI_DigiPlan_logos_horizontal.png new file mode 100644 index 00000000..d33250d5 Binary files /dev/null and b/docs/img/logos/RLI_DigiPlan_logos_horizontal.png differ diff --git a/docs/img/digiplan-favicon-512x512.png b/docs/img/logos/digiplan-favicon-512x512.png similarity index 100% rename from docs/img/digiplan-favicon-512x512.png rename to docs/img/logos/digiplan-favicon-512x512.png diff --git a/docs/img/digiplan-logo-horizontal.png b/docs/img/logos/digiplan-logo-horizontal.png similarity index 100% rename from docs/img/digiplan-logo-horizontal.png rename to docs/img/logos/digiplan-logo-horizontal.png diff --git a/docs/img/digiplan-logo.png b/docs/img/logos/digiplan-logo.png similarity index 100% rename from docs/img/digiplan-logo.png rename to docs/img/logos/digiplan-logo.png diff --git a/docs/img/rli_logo.png b/docs/img/logos/rli_logo.png similarity index 100% rename from docs/img/rli_logo.png rename to docs/img/logos/rli_logo.png diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..526d3fb2 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,6 @@ +mkdocs +mkdocs-material +mkdocs-include-markdown-plugin +mkdocs-same-dir +mkdocs-exclude +mkdocs-exclude-search \ No newline at end of file diff --git a/docs/sections/background.md b/docs/sections/background.md new file mode 100644 index 00000000..6397b0d4 --- /dev/null +++ b/docs/sections/background.md @@ -0,0 +1,35 @@ +# Hintergrund + +Die Energiewende kann nur gemeinsam erreicht werden. Wie aber können alle +Interessensgruppen ihre Sichtweisen und Bedürfnisse in die Energiewendeplanung +einbringen? +Das [Reiner Lemoine Institut (RLI)](https://reiner-lemoine-institut.de) hat +dieses +Stakeholder-Empowerment-Tool mit Unterstützung +der [Energieavantgarde Anhalt e.V.](https://www.energieavantgarde.de/) +entwickelt, das es Akteurinnen und Akteuren der Energiewende +ermöglicht, sich an Planungsprozessen zu beteiligen. + +Das Ziel ist die Beförderung des Klimaschutzes in ländlichen Regionen durch eine +effiziente und transparente Steuerung. Hierzu sollen dezentrale Energiesysteme +gestärkt werden, um beispielsweise hohe Netzentgelte für ländliche Regionen zu +vermeiden und die Akzeptanz des EE-Angebotes zu fördern. In einem dezentralen +Energiesystem können unfaire Kosten vermieden und die BürgerInnen zum Prosuming +ermutigt werden. Dies erfordert sorgfältiges Planen – auch über aktuelle +Markttrends hinaus. Dadurch entsteht eine höhere Akzeptanz durch eigenes und +gemeinsames Handeln und Verhandeln, wovon der Klimaschutz profitiert. + +Das Tool kann zur Unterstützung regionaler Planungsorganisationen und Landkreise +sowie für die Partizipation der Zivilgesellschaft eingesetzt werden, um die +Implementierung von Erneuerbaren Energien effizienter zu planen und den Zubau +besser und schneller zu steuern. Es ermöglicht BürgerInnen und Administrationen +den Zugriff auf den Energiekonsum und die genutzten und verfügbaren +Energiequellen in ihrer jeweiligen Region. Dies gewährleistet ein besseres +Verständnis der aktuellen Lage und hilft, Bedarfe und Potenziale zu erkunden +sowie Ausbaupfade zu verhandeln. + +## Förderung + +Das Projekt wurde im Rahmen des Projekts +[Heimat 2.0](https://www.bbsr.bund.de/BBSR/DE/forschung/programme/region-gestalten/initiativen/2020/heimat-2-0/01-start.html) +im Programm [Region gestalten](http://www.regiongestalten.de/) gefördert. diff --git a/docs/sections/data.md b/docs/sections/data.md new file mode 100644 index 00000000..89c91d51 --- /dev/null +++ b/docs/sections/data.md @@ -0,0 +1,7 @@ +{% + include-markdown "../../digipipe/store/DATASETS.md" +%} + +## Datasets + +See [Datasets](../datasets/raw_datasets.md) diff --git a/docs/sections/data_overview.md b/docs/sections/data_overview.md new file mode 100644 index 00000000..c4e3951b --- /dev/null +++ b/docs/sections/data_overview.md @@ -0,0 +1,18 @@ +# Überblick + +Hier werden die zugrunde liegende und abgeleiteten Daten sowie die Methoden +beschrieben. + +Grundlegender Datenfluss: + +0. [Rohdaten (RAW)](../datasets/raw_datasets.md) --> +1. [Vorverarbeitete Daten (PREPROCESSED)](../datasets/preprocessed_datasets.md) + --> +2. [Fertige Datensätze (DATASETS)](../datasets/datasets_datasets.md) --> +3. [App-fertige Datensätze (APPDATA)](../datasets/appdata_datasets.md) + +Für eine detaillierte Beschreibung des Datenflusses siehe +[Data Pipeline](data.md). + +Die Datensätze und dazugehörigen Methoden sind in den Kategorien im links +dargestellte Menü abrufbar. diff --git a/docs/sections/docs.md b/docs/sections/docs.md new file mode 100644 index 00000000..13745987 --- /dev/null +++ b/docs/sections/docs.md @@ -0,0 +1,8 @@ +# How to build the docs + +With activated env: + +``` +python docs/generate_dataset_mds.py +mkdocs build +``` diff --git a/docs/sections/esys.md b/docs/sections/esys.md new file mode 100644 index 00000000..5a714aaa --- /dev/null +++ b/docs/sections/esys.md @@ -0,0 +1,192 @@ +# Energiesystem + +Im Folgenden wird der Aufbau des in Digiplan verwendeten Energiesystems für die +Region Anhalt-Bitterfeld-Wittenberg beschrieben. Die Generierung der +Energiesystemdaten für die App ist Teil der +[Developer Docs](../sections/esys_dev.md). + +## Das Energiesystem im Überblick + +Schematische Darstellung: + +![Energy system schema](../img/esys/DigiPlan_Energiesystem.svg) + +In den Modellen wird das Energiesystem für das Jahr 2045 aufgestellt. Dabei +wird sich am derzeitigen Energiesystem orientiert, dessen Komponenten u.a. auf +den Daten des Marktstammdatenregisters (MaStR) beruhen. + +## Annahmen und Vereinfachungen + +Es werden die folgenden Annahmen und Vereinfachungen getroffen: + +- Für die Prognosen (z.B. Energieträger, Verbräuche) werden die + [BMWK Langfristszenarien](https://langfristszenarien.de) verwendet, vgl. + Datensatz [BMWK Langfristszenarien](../datasets/raw_datasets.md#bmwk-langfristszenarien). +- Biomasse wird unterteilt in: feste, gasförmige und flüssige Biomasse. +- Die Vorhersage zur Entwicklung der Biomasse geschieht auf der Basis von: + ["Technoökonomische Analyse und Transformationspfade des energetischen Biomassepotentials (TATBIO)"](https://www.bmwk.de/Redaktion/DE/Publikationen/Studien/technooekonomische-analyse-und-transformationspfade-des-energetischen-biomassepotentials.pdf?__blob=publicationFile&v=1), + vgl. Datensatz [dbfz_biomass_heat_capacities](../datasets/raw_datasets.md#installierte-leistungen-von-biomasse-konversionstechnologien). +- Flüssige Biomasse wird im Jahr 2045 ausschließlich im Verkehrsektor + eingesetzt. +- Der Verkehrssektor wird vernachlässigt. Demnach entfällt der Einsatz flüssiger + Biomasse. +- Es wird kein Biogas importiert: Sämtliches Biogas wird regional durch Biomasse + erzeugt. +- Der Bus Methan steht stellvertretend für einen Bus für klimaneutrale Gase. + Es handelt sich um eine Mischung aus grünem Wasserstoff und Biomethan. + Immer wenn von Methan gesprochen wird, ist diese Gasmischung gemeint. +- Der Energieträger Braunkohle ist nur relevant zur Berechnungen der Emissionen + zum Status quo. +- Für das Jahr 2045 ist kein Import von Braunkohle vorhergesehen. +- Es wird das Jahr 2045 in stündlichen Schritten betrachtet. +- Sämtliche Zeitreihen liegen in dieser Auslösung vor oder werden in diese + Auflösung übersetzt. +- Es handelt sich um eine Dispatch Optimierung. +- Sämtliche Kapazitäten und Energiemengen bis auf die der Importe und Exporte + sowie die der Speichertechnologien sind fix gesetzt. +- Die Optimierung findet nach dem Perfect-Forsight-Prinzip über den + Zeithorizont eines Jahres statt. +- Die Strombedarfe werden je Sektor (Haushalte, GHD, Industrie) aggregiert + abgebildet, vgl. Datensatz + [demand_electricity_region](../datasets/datasets_datasets.md#strombedarf). +- Die Wärmebedarfe werden je Sektor (Haushalte, GHD, Industrie) abgebildet, + unterteilt in zentral (Fernwärmenetze) und dezentral, vgl. Datensatz + [demand_heat_region](../datasets/datasets_datasets.md#wärmebedarf). +- Es erfolgt keine Modellierung der Strom- und Wärmenetze. + +## Das Energiesystem im Detail + +### Busse + +Die folgenden acht Energieträger sind die Busse im Energiesystem: + +- Elektrizität +- Zentrale Wärme +- Dezentrale Wärme +- Methan +- Holz +- Braunkohle +- Biomassse +- Biogas + +### Erzeugungstechnologien + +Volatile Erzeugungsanlagen werden mit der +[oemof.tabular Komponente volatile](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/volatile.py) +abgebildet. Die folgenden Erzeugungsanlagen werden im Energiesystem betrachtet: + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|------------------------------|---------------------|----------------------|-------------------------|---------------------------------- +| Wind | | Elektrizität | Onshore-Wind | wind-onshore +| PV (ground) | | Elektrizität | Freiflächen-PV | solar-pv_ground +| PV (rooftop) | | Elektrizität | Aufdach-PV | solar-pv_rooftop +| Cen. Solar thermal collector | | Elektrizität | Zentrale Solarthermie | solar-thermalcollector_central +| Dec. Solar thermal collector | | Elektrizität | Dezentrale Solarthermie | solar-thermalcollector_decentral +| RoR | | Elektrizität | Laufwasserkraftwerk | hydro-ror + + +### Import und Export + +Strombezug aus dem Stromnetz wird mittels einer +[oemof.tabular Komponente shortage](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/shortage.py) +abgebildet. +Mit dieser Komponente wird auch der Bezug von Biomethan abgebildet und der von +Holz und etwaiger Braunkohle. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|--------------------|---------------------|----------------------|-------------------------|-------------------- +| Electricity Import | | Elektrizität | Bezug aus dem Stromnetz | electricity-import +| Methane Import | | Methan | Bezug von Methan | ch4-import +| Wood Import | | Holz | Bezug von Holz | wood-import +| Lignite Import | | Braunkohle | Bezug von Braunkohle | lignite-import +| Biomass Import | | Biomasse | Bezug von Biomasse | biomass-import + +Es wird der Export von Strom berücksichtigt. Der Verkauf von Strom aus der +Region wird mit der +[oemof.tabular Komponente excess](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/excess.py) +abgebildet. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|--------------------|---------------------|----------------------|-------------------|-------------------- +| Electricity Export | Elektrizität | | Verkauf von Strom | electricity-export + +### Bedarf + +Die Sektoren Haushalte (HH), Gewerbe, Handel und Dienstleistungen (CTS) sowie +Industrie (Industry) haben einen Bedarf an Elektrizität, zentraler Wärme und +dezentraler Wärme. Der jeweilige Bedarf wird im Modell mit der +[oemof.tabular Komponente load](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/load.py) +abgebildet. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|------------------------------------|---------------------|----------------------|---------------------------------------|--------------------------- +| BEV demand | Elektrizität | | Strombedarf für Elektrofahrzeuge | electricity-bev_charging +| Electricity demand sector HH | Elektrizität | | Strombedarf Haushalte | electricity-demand_hh +| Electricity demand sector CTS | Elektrizität | | Strombedarf CTS | electricity-demand_cts +| Electricity demand sector Industry | Elektrizität | | Strombedarf Industrie | electricity-demand_ind +| Cen. heat demand sector HH | Zentrale Wärme | | Bedarf an zentraler Wärme Haushalte | heat_central-demand_hh +| Cen. heat demand sector CTS | Zentrale Wärme | | Bedarf an zentraler Wärme CTS | heat_central-demand_cts +| Cen. heat demand sector Industry | Zentrale Wärme | | Bedarf an zentraler Wärme Industrie | heat_central-demand_ind +| Dec. heat demand sector HH | Dezentrale Wärme | | Bedarf an dezentraler Wärme Haushalte | heat_decentral-demand_hh +| Dec. heat demand sector CTS | Dezentrale Wärme | | Bedarf an dezentraler Wärme CTS | heat_decentral-demand_cts +| Dec. heat demand sector Industry | Dezentrale Wärme | | Bedarf an dezentraler Wärme Industrie | heat_decentral-demand_ind + +### Wärmeerzeuger und weitere Wandler + +Wärmeerzeuger und weitere Wandler werden mittels der +[oemof.tabular Komponente conversion](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/conversion.py) +eingebunden. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|------------------------|---------------------|----------------------|----------------------------|-------------------------------- +| GT | Methan | Elektrizität | Gasturbine | ch4-gt +| Cen. HP | Elektrizität | Zentrale Wärme | Zentrale Wärmepumpe | electricity-heatpump_central +| Dec. HP | Elektrizität | Dezentrale Wärme | Dezentrale Wärmepumpe | electricity-heatpump_decentral +| Cen. PtH | Elektrizität | Zentrale Wärme | Zentrale Power-to-Heat | electricity-pth_central +| Dec. PtH | Elektrizität | Dezentrale Wärme | Dezentrale Power-to-Heat | electricity-pth_decentral +| Cen. Gas boiler | Methan | Zentrale Wärme | Zentrale Gasboiler | ch4-boiler_central +| Dec. Gas boiler | Methan | Dezentrale Wärme | Dezentrale Gasboiler | ch4-boiler_decentral +| Wood Oven | Holz | Dezentrale Wärme | Dezentraler Holzofen | wood-oven +| Coal Oven | Braunkohle | Dezentrale Wärme | Dezentraler Braunkohleofen | lignite-oven +| Biogas plant | Biomasse | Biogas | Biogasanlage | biomass-biogas_plant +| Biogas upgrading plant | Biogas | Methan | Biogasaufbereitung | biogas-biogas_upgrading_plant + +### Kraft-Wärme-Kopplung + +Bei der Kraft-Wärme-Kopplung wird zwischen Gas-Turbinen mit Abhitzekessel und +Blockheizkraftwerken unterschieden. Für Gas-Turbinen mit Abhitzekessel wird die +[oemof.tabular Komponente Komponente extraction_turbine](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/extraction_turbine.py) +verwendet. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|------------|---------------------|-----------------------------------|------------------------------------------|----------------------- +| Cen. CCGT | Methan | Elektrizität und Zentrale Wärme | Zentrale Gasturbinen mit Abhitzekessel | ch4-extchp_central +| Dec. CCGT | Methan | Elektrizität und Dezentrale Wärme | Dezentrale Gasturbinen mit Abhitzekessel | ch4-extchp_decentral +| Cen. CCGT | Holz | Elektrizität und Zentrale Wärme | Zentrale Gasturbinen mit Abhitzekessel | wood-extchp_central +| Dec. CCGT | Holz | Elektrizität und Dezentrale Wärme | Dezentrale Gasturbinen mit Abhitzekessel | wood-extchp_decentral + + +Für Blockheizkraftwerke wir auf die +[oemof.tabular Komponente Komponente backpressure_turbine](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/backpressure_turbine.py) +zurückgegriffen, welche einer Conversion Komponente mit zwei Ausgängen ähnelt +und daher vereinfachend zum Abbilden eines BHKWs verwendet werden kann. + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|------------|---------------------|-----------------------------------|------------------------------------------|---------------------------------- +| Cen. CHP | Methan | Elektrizität und Zentrale Wärme | Zentrales Blockheizkraftwerk | ch4-bpchp_central +| Dec. CHP | Methan | Elektrizität und Dezentrale Wärme | Dezentrales Blockheizkraftwerk | ch4-bpchp_decentral +| Cen. CHP | Biogas | Elektrizität und Zentrale Wärme | Zentrales Blockheizkraftwerk | biogas-bpchp_central +| Dec. CHP | Biogas | Elektrizität und Dezentrale Wärme | Dezentrales Blockheizkraftwerk | biogas-bpchp_decentral + +### Speicher + +Die folgenden Speicher sind im Energiesystem von ABW mittels der +[oemof.tabular Komponente Komponente storage](https://github.com/oemof/oemof-tabular/blob/dev/src/oemof/tabular/facades/storage.py) +eingebunden: + +| Komponente | Energieträger input | Energieträger output | Beschreibung | Komponentenname +|---------------|---------------------|----------------------|---------------------------------------|--------------------------------- +| Small Battery | Elektrizität | Elektrizität | Heimbatterien | electricity-small_scale_battery +| Large Battery | Elektrizität | Elektrizität | Großbatterien | electricity-large_scale_battery +| Cen. TES | Zentrale Wärme | Zentrale Wärme | Zentrale thermische Energiespeicher | heat_central-storage +| Dec. TES | Dezentrale Wärme | Dezentrale Wärme | Dezentrale thermische Energiespeicher | heat_decentral-storage diff --git a/docs/sections/esys_dev.md b/docs/sections/esys_dev.md new file mode 100644 index 00000000..6a81262f --- /dev/null +++ b/docs/sections/esys_dev.md @@ -0,0 +1,3 @@ +{% + include-markdown "../../digipipe/esys/ESYS.md" +%} \ No newline at end of file diff --git a/docs/sections/installation.md b/docs/sections/installation.md new file mode 100644 index 00000000..5c503d58 --- /dev/null +++ b/docs/sections/installation.md @@ -0,0 +1,61 @@ +# Installation + +**Note: Linux only, Windows is currently not supported.** + +First, clone via SSH using + + git clone git@github.com:rl-institut-private/digipipe.git /local/path/to/digipipe/ + +Prerequisite: + +- _conda_ ([Anaconda](https://docs.anaconda.com/anaconda/install/) +/[Miniconda](https://conda.io/en/latest/miniconda.html)) +- `python `_ + +Enter repo folder. Set up a conda environment and activate it with: + +``` +conda env create -f environment.yml +conda activate digipipe +``` + +Install [poetry](https://python-poetry.org/) (python dependency manager used +in this project) and dependencies for the project (Note: Installing poetry via +pip into same environment is not recommended and can cause trouble! Instead, it +should be installed system-wide via command below or +[pipx](https://python-poetry.org/docs/#installing-with-pipx)): + +``` +curl -sSL https://install.python-poetry.org | python3 - +poetry install +``` + +Some additional system packages are required, install them by + + sudo apt install gdal-bin python3-gdal libspatialindex-dev imagemagick osmium-tool graphviz graphviz-dev + +Notes: + +- Make sure you have GDAL>=3.0 as older versions will not work. +- `imagemagick` is optional and only required for report creation + +## Contributing to digipipe + +You can write [issues](https://github.com/rl-institut/digipipe/issues) +to announce bugs or to propose enhancements. + +If you want to participate in the development of digipipe, please make sure you +use pre-commit. + +You activate it with: + + pre-commit install + +To trigger a check manually, execute: + + pre-commit run -a + +## Runtime and resources + +**Warning:** Conversion and extraction process needs ~50 GB disk space and may +take a couple of hours to finish! diff --git a/docs/sections/scenarios.md b/docs/sections/scenarios.md new file mode 100644 index 00000000..1aef182b --- /dev/null +++ b/docs/sections/scenarios.md @@ -0,0 +1,35 @@ +# Szenarien + +Als Zielszenario wird ein Szenario für 2045 angenommen. Die Erzeugungs- und +Verbrauchsstrukturen orientieren sich am vorhandenen Kraftwerkspark und den +Potenzialen in der Region. Weitere Randbedingungen werden aus den +[BMWK Langfristszenarien](../datasets/raw_datasets.md#bmwk-langfristszenarien), +dem EEG 2023 und dem WindBG abgeleitet. Die Topologie des Energiesystems ist +unter [Energiesystem](../sections/esys.md) beschrieben. + +Die auf den Einstellelementen (Slider, Schalter) in der App dargestellten Ziele +für dieses Szenario sind technologiespezifisch und können der +[Dokumentation der Settings](../datasets/datasets_datasets.md#settings-fur-app) +entnommen werden. + +## Wetterjahr + +Es wird das **Wetterjahr 2011** verwendet. Im Rahmen einer +[Studie des Fraunhofer IWES](https://www.erneuerbar-mobil.de/sites/default/files/2019-10/Auswertung_7Wetterjahre_95Prozent_FraunhoferIWES.pdf) +wurden für ein -95%-Klimazielszenario sieben verschiedene Wetterjahre im +Hinblick auf die Auslegung eines sektorgekoppelten Energiesystems in +Deutschland untersucht. Im Ergebnis zeigte sich das Jahr 2011 als besonders +geeignet. + +Eckpunkte + +- Die Winderzeugung ist durchschnittlich +- Die PV-Erträge sind etwas höher +- Die Wärmebedarfe etwas geringer, was das Fortschreiten des Klimawandels bis + 2050 abbilden kann + +Zudem wird in der Analyse auch die Bedeutung von Extremwetterjahren betrachtet. +U.a. wird insbesondere das schlechte Windjahr 2010 mit dem durchschnittlichen +Jahr 2011 verglichen. Im Ergebnis ist ein mit dem durchschnittlichen Wetterjahr +ausgelegtes Energiesystem auch mit etwas schlechteren Bedingungen für fEE +versorgungssicher. diff --git a/docs/sections/structure.md b/docs/sections/structure.md new file mode 100644 index 00000000..c1b10e90 --- /dev/null +++ b/docs/sections/structure.md @@ -0,0 +1,67 @@ +# Structure of this repo + +``` +. +├── digipipe +│ ├── config +│ │ └── global.yml # Global config +│ ├── logs # Place for log files +│ ├── scenarios # Scenario definition +│ │ ├── .TEMPLATE # - Template +│ │ └── SCENARIOS.md +│ ├── scripts # Main scripts +│ │ ├── datasets # - for data processing which is shared by different datasets +│ │ ├── esm # - for energy system modelling +│ │ ├── config.py # - config-related functions +│ │ ├── geo.py # - spatial functions +│ │ └── data_io.py # - helper functions for data handling +│ ├── store # Data store +│ │ ├── appdata # - App-ready data +│ │ │ ├── data +│ │ │ ├── metadata +│ │ │ └── scenarios +│ │ ├── datasets # - Processed datasets +│ │ │ ├── bkg_vg250_districts_abw +│ │ │ ├── bkg_vg250_muns_abw +│ │ │ ├── bkg_vg250_region_abw +│ │ │ ├── osm_forest +│ │ │ ├── .TEMPLATE +│ │ │ └── module.smk +│ │ ├── preprocessed # - Preprocessed datasets +│ │ │ ├── bkg_vg250 +│ │ │ ├── osm_filtered +│ │ │ ├── .TEMPLATE +│ │ │ └── module.smk +│ │ ├── raw # - Raw datasets +│ │ │ ├── bkg_vg250 +│ │ │ ├── destatis_gv +│ │ │ ├── osm_sachsen-anhalt +│ │ │ └── .TEMPLATE +│ │ ├── temp # - Temporary files +│ │ ├── DATASETS.md +│ │ └── utils.py +│ └── workflow +│ ├── Snakefile # Main snakefile +│ ├── utils.py +│ └── WORKFLOW.md +├── docs # Documentation +│ ├── sections +│ │ ├── data.rst +│ │ ├── installation.rst +│ │ ├── scenarios.rst +│ │ ├── structure.rst +│ │ └── workflow.rst +│ ├── conf.py +│ ├── index.rst +│ └── Makefile +├── tests +├── CHANGELOG.md +├── CONTRIBUTING.md +├── environment.yml +├── LICENSE +├── README.md +├── requirements.txt +└── setup.py +``` + +(created via `tree --dirsfirst -L 4 -a -I '__*|*log|.gitkeep|PKG-INFO|*egg-info*|img|.git|.idea|venv|.snakemake' . > dirtree.txt`) diff --git a/docs/sections/workflow.md b/docs/sections/workflow.md new file mode 100644 index 00000000..46dd9a2e --- /dev/null +++ b/docs/sections/workflow.md @@ -0,0 +1,3 @@ +{% + include-markdown "../../digipipe/workflow/WORKFLOW.md" +%} diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 00000000..7fa096ef --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,70 @@ +th { + white-space: nowrap; + overflow: hidden; +} + +[data-md-color-scheme="default"] th { + background-color: #F5F5F5; +} + +[data-md-color-scheme="slate"] th { + background-color: #21222C; +} + +.md-typeset .admonition.info, +.md-typeset details.info { + border-color: #4051B5; +} + +.md-typeset .info>.admonition-title, +.md-typeset .info>summary { + background-color: #ffffff1a; +} + +.md-typeset .info>.admonition-title::before, +.md-typeset .info>summary::before { + background-color: #4051B5; + -webkit-mask-image: var(--md-admonition-icon--info); + mask-image: var(--md-admonition-icon--info); +} + +:root { + --md-admonition-icon--metadata: url('data:image/svg+xml;charset=utf-8,') +} + +.md-typeset .admonition.metadata, +.md-typeset details.metadata { + border-color: #4051B5; +} + +.md-typeset .metadata>.admonition-title, +.md-typeset .metadata>summary { + background-color: #4052b510; +} + +.md-typeset .metadata>.admonition-title::before, +.md-typeset .metadata>summary::before { + background-color: #4051B5; + -webkit-mask-image: var(--md-admonition-icon--metadata); + mask-image: var(--md-admonition-icon--metadata); +} + +/* custom width +@media only screen and (min-width: 76.25em) { + .md-main__inner { + max-width: none; + } + + .md-sidebar--primary { + left: 250px; + margin-right: 250px; + + } + + .md-sidebar--secondary { + right: 250px; + margin-left: 250px; + -webkit-transform: none; + transform: none; + } +} */ \ No newline at end of file diff --git a/environment.yml b/environment.yml index 318c1bb7..0309fa0f 100644 --- a/environment.yml +++ b/environment.yml @@ -1,11 +1,9 @@ name: digipipe +channels: + - conda-forge dependencies: - python=3.8 - gdal=3.* - - python-gdal + - coincbc # TODO: To be deleted after issue #52 done - libspatialindex - imagemagick - - pip - - pip: - - -e . - - -r requirements.txt diff --git a/index.md b/index.md new file mode 100644 index 00000000..cf77e152 --- /dev/null +++ b/index.md @@ -0,0 +1,27 @@ +# Willkommen! + +![Logo des Reiner Lemoine Instituts](docs/img/logos/RLI_DigiPlan_logos.png){ width="250", align="right"} + +Bei *digipipe* handelt es sich um die Datenpipeline zum +[Digitalen Planungsatlas Anhalt](https://digiplan.rl-institut.de), dem +Stakeholder-Empowerment-Tool für die Region Anhalt-Bitterfeld-Wittenberg (ABW). + +Die App und die dazugehörige Pipeline wurden vom +[Reiner Lemoine Institut (RLI)](https://reiner-lemoine-institut.de) im Rahmen +des Projekts +[DigiPlan](https://reiner-lemoine-institut.de/digitaler-planungsatlas-anhalt-digiplan/) +in Kooperation mit der +[Energieavantgarde Anhalt](https://www.energieavantgarde.de/) entwickelt. +Die App ist der Nachfolger des +[Stakeholder-Empowerment-Tools StEmp-ABW](https://wam.rl-institut.de/stemp_abw/). + +Der Quellcode der App Digiplan ist frei verfügbar. Die benötigten Daten wurden +mit der Daten-Pipeline Digipipe erzeugt, die ebenfalls den Prinzipien von Open +Source & Open Data folgt: + +- [Quellcode der Pipeline ("Digipipe")](https://github.com/rl-institut/digipipe) +- [Quellcode der App ("Digiplan")](https://github.com/rl-institut/digiplan) + +Dieses Tool und mehr Informationen zu unseren anderen Tools, Karten und Apps zur +Energiewende finden Sie auf dem +[WAM-Portal des RLI](https://wam.rl-institut.de/). diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..b9068e61 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,95 @@ +repo_name: rl-institut/digipipe +repo_url: https://github.com/rl-institut/digipipe +site_name: digipipe documentation +site_url: http://127.0.0.1:8000/ +docs_dir: . +site_dir: ./site + +nav: + - Digipipe: index.md + - Hintergrund: docs/sections/background.md + - Szenarien: docs/sections/scenarios.md + - Energiesystem: docs/sections/esys.md + - Daten und Methodik: + - Überblick: docs/sections/data_overview.md + - Raw: docs/datasets/raw_datasets.md + - Preprocessed: docs/datasets/preprocessed_datasets.md + - Datasets: docs/datasets/datasets_datasets.md + - Appdata: docs/datasets/appdata_datasets.md + - Regionsspezifische Datensätze: docs/datasets/region_specific_datasets.md + - Developer Docs (en): + - Installation: docs/sections/installation.md + - Data pipeline: docs/sections/data.md + - Workflow: docs/sections/workflow.md + - Build the energy system: docs/sections/esys_dev.md + - Repo structure: docs/sections/structure.md + - Docs: docs/sections/docs.md + +plugins: + - search: + lang: + - en + - de + - exclude-search: + exclude: + - digipipe/* + - include-markdown + - same-dir + - exclude: + glob: + - digipipe/esys/* + - digipipe/config/* + - README.md + - CHANGELOG.md + - "*.tmp" + - "*.pdf" + - "*.gz" + - "*.py" + - "*.pyc" + - "*.gpkg" + - "*.zip" + - "*.csv" + regex: + - '.*\.(tmp|bin|tar)$' + +theme: + name: material + features: + - navigation.footer + - navigation.tracking + - navigation.tabs + - navigation.instant + - navigation.indexes + - navigation.top + - content.code.copy + - toc.follow + - search.highlight + - search.suggestions + palette: + - scheme: default + primary: indigo + accent: indigo + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + primary: indigo + accent: indigo + toggle: + icon: material/brightness-4 + name: Switch to light mode + +extra_css: + - docs/stylesheets/extra.css + +markdown_extensions: + - attr_list + - md_in_html + - pymdownx.smartsymbols: + arrows: True + - toc: + permalink: "#" + - admonition + - pymdownx.details + - pymdownx.superfences + diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..6a02884e --- /dev/null +++ b/poetry.lock @@ -0,0 +1,5081 @@ +# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. + +[[package]] +name = "affine" +version = "2.4.0" +description = "Matrices describing affine transformation of the plane" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "affine-2.4.0-py3-none-any.whl", hash = "sha256:8a3df80e2b2378aef598a83c1392efd47967afec4242021a0b06b4c7cbc61a92"}, + {file = "affine-2.4.0.tar.gz", hash = "sha256:a24d818d6a836c131976d22f8c27b8d3ca32d0af64c1d8d29deb7bafa4da1eea"}, +] + +[package.extras] +dev = ["coveralls", "flake8", "pydocstyle"] +test = ["pytest (>=4.6)", "pytest-cov"] + +[[package]] +name = "annotated-types" +version = "0.5.0" +description = "Reusable constraint types to use with typing.Annotated" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, + {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "astroid" +version = "2.15.6" +description = "An abstract syntax tree for Python with inference support." +category = "dev" +optional = false +python-versions = ">=3.7.2" +files = [ + {file = "astroid-2.15.6-py3-none-any.whl", hash = "sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c"}, + {file = "astroid-2.15.6.tar.gz", hash = "sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd"}, +] + +[package.dependencies] +lazy-object-proxy = ">=1.4.0" +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} +wrapt = [ + {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, + {version = ">=1.14,<2", markers = "python_version >= \"3.11\""}, +] + +[[package]] +name = "atomicwrites" +version = "1.4.1" +description = "Atomic file writes." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, +] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "babel" +version = "2.12.1" +description = "Internationalization utilities" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, +] + +[package.dependencies] +pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} + +[[package]] +name = "bandit" +version = "1.7.5" +description = "Security oriented static analyser for python code." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bandit-1.7.5-py3-none-any.whl", hash = "sha256:75665181dc1e0096369112541a056c59d1c5f66f9bb74a8d686c3c362b83f549"}, + {file = "bandit-1.7.5.tar.gz", hash = "sha256:bdfc739baa03b880c2d15d0431b31c658ffc348e907fe197e54e0389dd59e11e"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} +GitPython = ">=1.0.1" +PyYAML = ">=5.3.1" +rich = "*" +stevedore = ">=1.20.0" + +[package.extras] +test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"] +toml = ["tomli (>=1.1.0)"] +yaml = ["PyYAML"] + +[[package]] +name = "bcrypt" +version = "4.0.1" +description = "Modern password hashing for your software and your servers" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, + {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, + {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, + {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +description = "Screen-scraping library" +category = "main" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "black" +version = "21.12b0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6.2" +files = [ + {file = "black-21.12b0-py3-none-any.whl", hash = "sha256:a615e69ae185e08fdd73e4715e260e2479c861b5740057fde6e8b4e3b7dd589f"}, + {file = "black-21.12b0.tar.gz", hash = "sha256:77b80f693a569e2e527958459634f18df9b0ba2625ba4e0c2d5da5be42e6f2b3"}, +] + +[package.dependencies] +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0,<1" +platformdirs = ">=2" +tomli = ">=0.2.6,<2.0.0" +typing-extensions = [ + {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}, + {version = ">=3.10.0.0,<3.10.0.1 || >3.10.0.1", markers = "python_version >= \"3.10\""}, +] + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +python2 = ["typed-ast (>=1.4.3)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "blinker" +version = "1.6.2" +description = "Fast, simple object-to-object and broadcast signaling" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "blinker-1.6.2-py3-none-any.whl", hash = "sha256:c3d739772abb7bc2860abf5f2ec284223d9ad5c76da018234f6f50d6f31ab1f0"}, + {file = "blinker-1.6.2.tar.gz", hash = "sha256:4afd3de66ef3a9f8067559fb7a1cbe555c17dcbe15971b05d1b625c3e7abe213"}, +] + +[[package]] +name = "boto3" +version = "1.26.125" +description = "The AWS SDK for Python" +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.26.125-py3-none-any.whl", hash = "sha256:6648aff15d19927cd26db47eb56362ccd313a1ddbd7aaa3235ef05d05d398252"}, + {file = "boto3-1.26.125.tar.gz", hash = "sha256:fe8248b80c4f0fdaed8b8779467c4431a5e52177e02ccd137d51ec51194ebb00"}, +] + +[package.dependencies] +botocore = ">=1.29.125,<1.30.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.29.165" +description = "Low-level, data-driven core of boto 3." +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.29.165-py3-none-any.whl", hash = "sha256:6f35d59e230095aed7cd747604fe248fa384bebb7d09549077892f936a8ca3df"}, + {file = "botocore-1.29.165.tar.gz", hash = "sha256:988b948be685006b43c4bbd8f5c0cb93e77c66deb70561994e0c5b31b5a67210"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.9)"] + +[[package]] +name = "cchardet" +version = "2.1.7" +description = "cChardet is high speed universal character encoding detector." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "cchardet-2.1.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c6f70139aaf47ffb94d89db603af849b82efdf756f187cdd3e566e30976c519f"}, + {file = "cchardet-2.1.7-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5a25f9577e9bebe1a085eec2d6fdd72b7a9dd680811bba652ea6090fb2ff472f"}, + {file = "cchardet-2.1.7-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6b6397d8a32b976a333bdae060febd39ad5479817fabf489e5596a588ad05133"}, + {file = "cchardet-2.1.7-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:228d2533987c450f39acf7548f474dd6814c446e9d6bd228e8f1d9a2d210f10b"}, + {file = "cchardet-2.1.7-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:54341e7e1ba9dc0add4c9d23b48d3a94e2733065c13920e85895f944596f6150"}, + {file = "cchardet-2.1.7-cp36-cp36m-win32.whl", hash = "sha256:eee4f5403dc3a37a1ca9ab87db32b48dc7e190ef84601068f45397144427cc5e"}, + {file = "cchardet-2.1.7-cp36-cp36m-win_amd64.whl", hash = "sha256:f86e0566cb61dc4397297696a4a1b30f6391b50bc52b4f073507a48466b6255a"}, + {file = "cchardet-2.1.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:302aa443ae2526755d412c9631136bdcd1374acd08e34f527447f06f3c2ddb98"}, + {file = "cchardet-2.1.7-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:70eeae8aaf61192e9b247cf28969faef00578becd2602526ecd8ae7600d25e0e"}, + {file = "cchardet-2.1.7-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a39526c1c526843965cec589a6f6b7c2ab07e3e56dc09a7f77a2be6a6afa4636"}, + {file = "cchardet-2.1.7-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b154effa12886e9c18555dfc41a110f601f08d69a71809c8d908be4b1ab7314f"}, + {file = "cchardet-2.1.7-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:ec3eb5a9c475208cf52423524dcaf713c394393e18902e861f983c38eeb77f18"}, + {file = "cchardet-2.1.7-cp37-cp37m-win32.whl", hash = "sha256:50ad671e8d6c886496db62c3bd68b8d55060688c655873aa4ce25ca6105409a1"}, + {file = "cchardet-2.1.7-cp37-cp37m-win_amd64.whl", hash = "sha256:54d0b26fd0cd4099f08fb9c167600f3e83619abefeaa68ad823cc8ac1f7bcc0c"}, + {file = "cchardet-2.1.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b59ddc615883835e03c26f81d5fc3671fab2d32035c87f50862de0da7d7db535"}, + {file = "cchardet-2.1.7-cp38-cp38-manylinux1_i686.whl", hash = "sha256:27a9ba87c9f99e0618e1d3081189b1217a7d110e5c5597b0b7b7c3fedd1c340a"}, + {file = "cchardet-2.1.7-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:90086e5645f8a1801350f4cc6cb5d5bf12d3fa943811bb08667744ec1ecc9ccd"}, + {file = "cchardet-2.1.7-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:45456c59ec349b29628a3c6bfb86d818ec3a6fbb7eb72de4ff3bd4713681c0e3"}, + {file = "cchardet-2.1.7-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f16517f3697569822c6d09671217fdeab61dfebc7acb5068634d6b0728b86c0b"}, + {file = "cchardet-2.1.7-cp38-cp38-win32.whl", hash = "sha256:0b859069bbb9d27c78a2c9eb997e6f4b738db2d7039a03f8792b4058d61d1109"}, + {file = "cchardet-2.1.7-cp38-cp38-win_amd64.whl", hash = "sha256:273699c4e5cd75377776501b72a7b291a988c6eec259c29505094553ee505597"}, + {file = "cchardet-2.1.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:48ba829badef61441e08805cfa474ccd2774be2ff44b34898f5854168c596d4d"}, + {file = "cchardet-2.1.7-cp39-cp39-manylinux1_i686.whl", hash = "sha256:bd7f262f41fd9caf5a5f09207a55861a67af6ad5c66612043ed0f81c58cdf376"}, + {file = "cchardet-2.1.7-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:fdac1e4366d0579fff056d1280b8dc6348be964fda8ebb627c0269e097ab37fa"}, + {file = "cchardet-2.1.7-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:80e6faae75ecb9be04a7b258dc4750d459529debb6b8dee024745b7b5a949a34"}, + {file = "cchardet-2.1.7-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c96aee9ebd1147400e608a3eff97c44f49811f8904e5a43069d55603ac4d8c97"}, + {file = "cchardet-2.1.7-cp39-cp39-win32.whl", hash = "sha256:2309ff8fc652b0fc3c0cff5dbb172530c7abb92fe9ba2417c9c0bcf688463c1c"}, + {file = "cchardet-2.1.7-cp39-cp39-win_amd64.whl", hash = "sha256:24974b3e40fee9e7557bb352be625c39ec6f50bc2053f44a3d1191db70b51675"}, + {file = "cchardet-2.1.7.tar.gz", hash = "sha256:c428b6336545053c2589f6caf24ea32276c6664cb86db817e03a94c60afa0eaf"}, +] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.2.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "main" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, + {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +] + +[[package]] +name = "click" +version = "7.1.2" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, +] + +[[package]] +name = "click-plugins" +version = "1.1.1" +description = "An extension module for click to enable registering CLI commands via setuptools entry-points." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, + {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, +] + +[package.dependencies] +click = ">=4.0" + +[package.extras] +dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] + +[[package]] +name = "cligj" +version = "0.7.2" +description = "Click params for commmand line interfaces to GeoJSON" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" +files = [ + {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"}, + {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"}, +] + +[package.dependencies] +click = ">=4.0" + +[package.extras] +test = ["pytest-cov"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "configargparse" +version = "1.7" +description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables." +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "ConfigArgParse-1.7-py3-none-any.whl", hash = "sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b"}, + {file = "ConfigArgParse-1.7.tar.gz", hash = "sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1"}, +] + +[package.extras] +test = ["PyYAML", "mock", "pytest"] +yaml = ["PyYAML"] + +[[package]] +name = "connection-pool" +version = "0.0.3" +description = "thread safe connection pool" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "connection_pool-0.0.3.tar.gz", hash = "sha256:bf429e7aef65921c69b4ed48f3d48d3eac1383b05d2df91884705842d974d0dc"}, +] + +[[package]] +name = "contourpy" +version = "1.1.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"}, + {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, + {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, + {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, + {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, + {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, + {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, + {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, + {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, + {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, + {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, + {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, + {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, + {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, + {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, + {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f"}, + {file = "contourpy-1.1.0.tar.gz", hash = "sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21"}, +] + +[package.dependencies] +numpy = ">=1.16" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.2.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "wurlitzer"] + +[[package]] +name = "cryptography" +version = "41.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"}, + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"}, + {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"}, + {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"}, + {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"}, +] + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cssselect" +version = "1.2.0" +description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cssselect-1.2.0-py2.py3-none-any.whl", hash = "sha256:da1885f0c10b60c03ed5eccbb6b68d6eff248d91976fcde348f395d54c9fd35e"}, + {file = "cssselect-1.2.0.tar.gz", hash = "sha256:666b19839cfaddb9ce9d36bfe4c969132c647b92fc9088c4e23f786b30f1b3dc"}, +] + +[[package]] +name = "cycler" +version = "0.11.0" +description = "Composable style cycles" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, + {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, +] + +[[package]] +name = "darglint" +version = "1.8.1" +description = "A utility for ensuring Google-style docstrings stay up to date with the source code." +category = "dev" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"}, + {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"}, +] + +[[package]] +name = "datapackage" +version = "1.5.1" +description = "Utilities to work with Data Packages as defined on specs.frictionlessdata.io" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "datapackage-1.5.1-py2.py3-none-any.whl", hash = "sha256:8249d38852c3ae8af99c63a6ab90e7568215a78bee2ef7e9897d16c1945928ea"}, + {file = "datapackage-1.5.1.tar.gz", hash = "sha256:72af7738eb614068706a9a32b0814df9c77a99f5508df3bed6331196c05de9e4"}, +] + +[package.dependencies] +cchardet = ">=1.0,<3.0" +click = ">=6.7,<8.0" +jsonpointer = ">=1.10,<2.0" +jsonschema = ">=2.5,<3.0" +requests = ">=2.8,<3.0" +six = ">=1.10,<2.0" +tableschema = ">=1.1.0,<2.0" +tabulator = ">=1.3,<2.0" +unicodecsv = ">=0.14,<2.0" + +[package.extras] +develop = ["pylama", "tox"] + +[[package]] +name = "datrie" +version = "0.8.2" +description = "Super-fast, efficiently stored Trie for Python." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "datrie-0.8.2-cp27-cp27m-macosx_10_7_x86_64.whl", hash = "sha256:53969643e2794c37f024d5edaa42d5e6e2627d9937ddcc18d99128e9df700e4c"}, + {file = "datrie-0.8.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:6c9b333035312b79e6e9a10356d033e3d29aadbae6365007f706c854b3a94674"}, + {file = "datrie-0.8.2-cp27-cp27m-win32.whl", hash = "sha256:c783e2c1e28964b2b045a951eb9606833a188c4bd4a780da68d22f557e03e429"}, + {file = "datrie-0.8.2-cp27-cp27m-win_amd64.whl", hash = "sha256:f826e843138698501cbf1a21233f724b851b1e475fad532b638ac5904e115f10"}, + {file = "datrie-0.8.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bf5c956c0a9a9d0f07e3c8923746279171096de18a8a51685e22d9817f8755a6"}, + {file = "datrie-0.8.2-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:2de594d84a2f43a09ddc15316a8afd48aae0fdc456f9279d0940aa59c473d9d5"}, + {file = "datrie-0.8.2-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:651c63325056347b86c5de7ffeea8529230a5787c61ee6dcabc5b6c644bd3252"}, + {file = "datrie-0.8.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0e3b76676abbae2368cce6bf605bb0ba7cfd11f2c420b96d67959f353d5d423f"}, + {file = "datrie-0.8.2-cp35-cp35m-win32.whl", hash = "sha256:3a3e360a765cc95410898dc222f8585ea1b1bba0538a1af4d8630a5bc3ad6ee7"}, + {file = "datrie-0.8.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fa9f39ac88dc6286672b9dd286fe459646da48133c877a927af24803eaea441e"}, + {file = "datrie-0.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b6fd6c7c149b410a87d46072c1c98f6e87ec557802e1d0e09db7b858746e8550"}, + {file = "datrie-0.8.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:327d9c17efaebc66d1956dca047b76fdd0e5b989d63cb55b9038ec09d8769089"}, + {file = "datrie-0.8.2-cp36-cp36m-win32.whl", hash = "sha256:ee7cd8470a982356e104e62148f2dbe2d3e17545cafaa3ada29f2548984f1e89"}, + {file = "datrie-0.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:31e316ba305cdd7b8a42f8e4af5a0a15a628aee270d2f392c41329a709eeda6d"}, + {file = "datrie-0.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe04704eb41b8440ca61416d3670ca6ddeea847d19731cf121889bac2962d07"}, + {file = "datrie-0.8.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d704ee4fdc03f02d7dacc4d92052dbd490dba551509fccfd8ee52c9039d4ad"}, + {file = "datrie-0.8.2-cp37-cp37m-win32.whl", hash = "sha256:25e9e07ecfceaef78d23bde8d7278e4d6f63e1e3dc5ac00ccb4bec3062f0a8e0"}, + {file = "datrie-0.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:bf9f34f7c63797219b32713b561c4f94e777ff6c22beecfcd6bdf6b6c25b8518"}, + {file = "datrie-0.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e0582435a4adef1a2fce53aeedb656bf769b0f113b524f98be51d3e3d40720cb"}, + {file = "datrie-0.8.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b2d80fa687173cb8f8bae224ef00d1ad6bda8f8597bbb1a63f85182c7d91aeb3"}, + {file = "datrie-0.8.2-cp38-cp38-win32.whl", hash = "sha256:67603594f5db5c0029b1cf86a08e89cde015fe64cf0c4ae4e539c61114396729"}, + {file = "datrie-0.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61cf2726f04c08828bfb4e7af698b0b16bdf2777c3993d042f2898b8e118f21"}, + {file = "datrie-0.8.2-pp273-pypy_73-win32.whl", hash = "sha256:b07bd5fdfc3399a6dab86d6e35c72b1dbd598e80c97509c7c7518ab8774d3fda"}, + {file = "datrie-0.8.2-pp373-pypy36_pp73-win32.whl", hash = "sha256:89ff3d41df4f899387aa07b4b066f5da36e3a10b67b8aeae631c950502ff4503"}, + {file = "datrie-0.8.2.tar.gz", hash = "sha256:525b08f638d5cf6115df6ccd818e5a01298cd230b2dac91c8ff2e6499d18765d"}, +] + +[[package]] +name = "descartes" +version = "1.1.0" +description = "Use geometric objects as matplotlib paths and patches" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "descartes-1.1.0-py2-none-any.whl", hash = "sha256:b7e412e7e6e294412f1d0f661f187babc970088c2456089e6801eebb043c2e1b"}, + {file = "descartes-1.1.0-py3-none-any.whl", hash = "sha256:4c62dc41109689d03e4b35de0a2bcbdeeb81047badc607c4415d5c753bd683af"}, + {file = "descartes-1.1.0.tar.gz", hash = "sha256:135a502146af5ed6ff359975e2ebc5fa4b71b5432c355c2cafdc6dea1337035b"}, +] + +[package.dependencies] +matplotlib = "*" + +[[package]] +name = "dill" +version = "0.3.7" +description = "serialize all of Python" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, + {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] + +[[package]] +name = "distlib" +version = "0.3.7" +description = "Distribution utilities" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"}, + {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"}, +] + +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + +[[package]] +name = "docutils" +version = "0.20.1" +description = "Docutils -- Python Documentation Utilities" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, + {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, +] + +[[package]] +name = "dpath" +version = "2.1.6" +description = "Filesystem-like pathing and searching for dictionaries" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "dpath-2.1.6-py3-none-any.whl", hash = "sha256:31407395b177ab63ef72e2f6ae268c15e938f2990a8ecf6510f5686c02b6db73"}, + {file = "dpath-2.1.6.tar.gz", hash = "sha256:f1e07c72e8605c6a9e80b64bc8f42714de08a789c7de417e49c3f87a19692e47"}, +] + +[[package]] +name = "dynaconf" +version = "3.2.2" +description = "The dynamic configurator for your Python Project" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dynaconf-3.2.2-py2.py3-none-any.whl", hash = "sha256:0d62e51af6e9971e8e45cabee487ec70467d6c5065a9f070beac973bedaf1d54"}, + {file = "dynaconf-3.2.2.tar.gz", hash = "sha256:2f98ec85a2b8edb767b3ed0f82c6d605d30af116ce4622932a719ba70ff152fc"}, +] + +[package.extras] +all = ["configobj", "hvac", "redis", "ruamel.yaml"] +configobj = ["configobj"] +ini = ["configobj"] +redis = ["redis"] +test = ["configobj", "django", "flake8", "flake8-debugger", "flake8-print", "flake8-todo", "flask (>=0.12)", "hvac", "pep8-naming", "pytest", "pytest-cov", "pytest-mock", "pytest-xdist", "python-dotenv", "radon", "redis", "toml"] +toml = ["toml"] +vault = ["hvac"] +yaml = ["ruamel.yaml"] + +[[package]] +name = "eradicate" +version = "2.3.0" +description = "Removes commented-out code." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "eradicate-2.3.0-py3-none-any.whl", hash = "sha256:2b29b3dd27171f209e4ddd8204b70c02f0682ae95eecb353f10e8d72b149c63e"}, + {file = "eradicate-2.3.0.tar.gz", hash = "sha256:06df115be3b87d0fc1c483db22a2ebb12bcf40585722810d809cc770f5031c37"}, +] + +[[package]] +name = "et-xmlfile" +version = "1.1.0" +description = "An implementation of lxml.xmlfile for the standard library" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"}, + {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"}, +] + +[[package]] +name = "fastjsonschema" +version = "2.18.0" +description = "Fastest Python implementation of JSON schema" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "fastjsonschema-2.18.0-py3-none-any.whl", hash = "sha256:128039912a11a807068a7c87d0da36660afbfd7202780db26c4aa7153cfdc799"}, + {file = "fastjsonschema-2.18.0.tar.gz", hash = "sha256:e820349dd16f806e4bd1467a138dced9def4bc7d6213a34295272a6cac95b5bd"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "filelock" +version = "3.12.3" +description = "A platform independent file lock." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.12.3-py3-none-any.whl", hash = "sha256:f067e40ccc40f2b48395a80fcbd4728262fab54e232e090a4063ab804179efeb"}, + {file = "filelock-3.12.3.tar.gz", hash = "sha256:0ecc1dd2ec4672a10c8550a8182f1bd0c0a5088470ecd5a125e45f49472fac3d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.7.1", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "fiona" +version = "1.8.22" +description = "Fiona reads and writes spatial data files" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "Fiona-1.8.22-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:59a3800bc09ebee3516d64d02a8a6818d07ab1573c6096f3ef3468bf9f8f95f8"}, + {file = "Fiona-1.8.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:904793b17aee70ca9c3d582dbf01623eccfdeacd00c5e1a8e421be41f2e43d67"}, + {file = "Fiona-1.8.22-cp310-cp310-win_amd64.whl", hash = "sha256:df34c980cd7396adfbc89bbb363bdd6e358c76f91969fc98c9dfc076dd11638d"}, + {file = "Fiona-1.8.22-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:75924f69c51db6e258c91308780546278028c509db12aa33a47692a0266c9667"}, + {file = "Fiona-1.8.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33860aaf70bbd2726cff12fd3857bd832b6dc2ad3ce4b27e7563bd68abdc26f"}, + {file = "Fiona-1.8.22-cp311-cp311-win_amd64.whl", hash = "sha256:18649326a7724611b16b648e14fd094089d517413b95ac91d0cdb0adc5fcb8de"}, + {file = "Fiona-1.8.22-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:c4aafdd565b3a30bdd78cafae35d4945f6741eef31401c1bb1e166b6262d7539"}, + {file = "Fiona-1.8.22-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f26c8b6ea9bc92cbd52a4dd83ffd44472450bf92f4e3d4ef2341adc2f35a54d"}, + {file = "Fiona-1.8.22-cp36-cp36m-win_amd64.whl", hash = "sha256:c28d9ffa5d230a1d9eaf571529fa9eb7573d39613354c090ad077ad153a37ee1"}, + {file = "Fiona-1.8.22-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:d47777890aa1d715025abc7a6d6b2a6bb8d2a37cc94c44ce95940b80eda21444"}, + {file = "Fiona-1.8.22-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:e3ed1c0c1c60f710a612aaeb294de54214d228c4ef40e0c1dc159e46f86a9446"}, + {file = "Fiona-1.8.22-cp37-cp37m-win_amd64.whl", hash = "sha256:ce9a22c9883cc5d11c05ba3fb9db5082044a07c6b299753ea5bb8e178b8ba53b"}, + {file = "Fiona-1.8.22-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:6ba2294bc6adcbc36229862667aac6b98e6c306e1958caf53b8bfcf9a3b8c77a"}, + {file = "Fiona-1.8.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5cad3424b7473eb0e19f17ee45abec92133a694a4b452a278f02e3b8d0f810f"}, + {file = "Fiona-1.8.22-cp38-cp38-win_amd64.whl", hash = "sha256:b88e2e6548a41c1dfa3f96c8275ff472a3edca729e14a641c0fa5b2e146a8ab5"}, + {file = "Fiona-1.8.22-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:ed75dd29c89e0e455e3a322f28cd92f192bcb8fced16e2bfb6422a7f95ffe5e9"}, + {file = "Fiona-1.8.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89cfcc3bdb4aba7bba1eb552b3866b851334693ab694529803122b21f5927960"}, + {file = "Fiona-1.8.22-cp39-cp39-win_amd64.whl", hash = "sha256:d0df3e105ad7f0cca5f16b441c232fd693ef6c4adf2c1b6271aaaa1cdc06164d"}, + {file = "Fiona-1.8.22.tar.gz", hash = "sha256:a82a99ce9b3e7825740157c45c9fb2259d4e92f0a886aaac25f0db40ffe1eea3"}, +] + +[package.dependencies] +attrs = ">=17" +certifi = "*" +click = ">=4.0" +click-plugins = ">=1.0" +cligj = ">=0.5" +munch = "*" +setuptools = "*" +six = ">=1.7" + +[package.extras] +all = ["boto3 (>=1.2.4)", "mock", "pytest (>=3)", "pytest-cov", "shapely"] +calc = ["shapely"] +s3 = ["boto3 (>=1.2.4)"] +test = ["boto3 (>=1.2.4)", "mock", "pytest (>=3)", "pytest-cov"] + +[[package]] +name = "flake8" +version = "3.9.2" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, + {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, +] + +[package.dependencies] +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.7.0,<2.8.0" +pyflakes = ">=2.3.0,<2.4.0" + +[[package]] +name = "flake8-bandit" +version = "3.0.0" +description = "Automated security testing with bandit and flake8." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "flake8_bandit-3.0.0-py2.py3-none-any.whl", hash = "sha256:61b617f4f7cdaa0e2b1e6bf7b68afb2b619a227bb3e3ae00dd36c213bd17900a"}, + {file = "flake8_bandit-3.0.0.tar.gz", hash = "sha256:54d19427e6a8d50322a7b02e1841c0a7c22d856975f3459803320e0e18e2d6a1"}, +] + +[package.dependencies] +bandit = ">=1.7.3" +flake8 = "*" +flake8-polyfill = "*" +pycodestyle = "*" + +[[package]] +name = "flake8-bugbear" +version = "22.12.6" +description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "flake8-bugbear-22.12.6.tar.gz", hash = "sha256:4cdb2c06e229971104443ae293e75e64c6107798229202fbe4f4091427a30ac0"}, + {file = "flake8_bugbear-22.12.6-py3-none-any.whl", hash = "sha256:b69a510634f8a9c298dfda2b18a8036455e6b19ecac4fe582e4d7a0abfa50a30"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +flake8 = ">=3.0.0" + +[package.extras] +dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"] + +[[package]] +name = "flake8-builtins" +version = "1.5.3" +description = "Check for python builtins being used as variables or parameters." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "flake8-builtins-1.5.3.tar.gz", hash = "sha256:09998853b2405e98e61d2ff3027c47033adbdc17f9fe44ca58443d876eb00f3b"}, + {file = "flake8_builtins-1.5.3-py2.py3-none-any.whl", hash = "sha256:7706babee43879320376861897e5d1468e396a40b8918ed7bccf70e5f90b8687"}, +] + +[package.dependencies] +flake8 = "*" + +[package.extras] +test = ["coverage", "coveralls", "mock", "pytest", "pytest-cov"] + +[[package]] +name = "flake8-comprehensions" +version = "3.14.0" +description = "A flake8 plugin to help you write better list/set/dict comprehensions." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "flake8_comprehensions-3.14.0-py3-none-any.whl", hash = "sha256:7b9d07d94aa88e62099a6d1931ddf16c344d4157deedf90fe0d8ee2846f30e97"}, + {file = "flake8_comprehensions-3.14.0.tar.gz", hash = "sha256:81768c61bfc064e1a06222df08a2580d97de10cb388694becaf987c331c6c0cf"}, +] + +[package.dependencies] +flake8 = ">=3.0,<3.2.0 || >3.2.0" + +[[package]] +name = "flake8-eradicate" +version = "1.4.0" +description = "Flake8 plugin to find commented out code" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "flake8-eradicate-1.4.0.tar.gz", hash = "sha256:3088cfd6717d1c9c6c3ac45ef2e5f5b6c7267f7504d5a74b781500e95cb9c7e1"}, + {file = "flake8_eradicate-1.4.0-py3-none-any.whl", hash = "sha256:e3bbd0871be358e908053c1ab728903c114f062ba596b4d40c852fd18f473d56"}, +] + +[package.dependencies] +attrs = "*" +eradicate = ">=2.0,<3.0" +flake8 = ">=3.5,<6" + +[[package]] +name = "flake8-isort" +version = "4.2.0" +description = "flake8 plugin that integrates isort ." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "flake8-isort-4.2.0.tar.gz", hash = "sha256:26571500cd54976bbc0cf1006ffbcd1a68dd102f816b7a1051b219616ba9fee0"}, + {file = "flake8_isort-4.2.0-py3-none-any.whl", hash = "sha256:5b87630fb3719bf4c1833fd11e0d9534f43efdeba524863e15d8f14a7ef6adbf"}, +] + +[package.dependencies] +flake8 = ">=3.2.1,<6" +isort = ">=4.3.5,<6" + +[package.extras] +test = ["pytest-cov"] + +[[package]] +name = "flake8-plugin-utils" +version = "1.3.3" +description = "The package provides base classes and utils for flake8 plugin writing" +category = "dev" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "flake8-plugin-utils-1.3.3.tar.gz", hash = "sha256:39f6f338d038b301c6fd344b06f2e81e382b68fa03c0560dff0d9b1791a11a2c"}, + {file = "flake8_plugin_utils-1.3.3-py3-none-any.whl", hash = "sha256:e4848c57d9d50f19100c2d75fa794b72df068666a9041b4b0409be923356a3ed"}, +] + +[[package]] +name = "flake8-polyfill" +version = "1.0.2" +description = "Polyfill package for Flake8 plugins" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "flake8-polyfill-1.0.2.tar.gz", hash = "sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda"}, + {file = "flake8_polyfill-1.0.2-py2.py3-none-any.whl", hash = "sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9"}, +] + +[package.dependencies] +flake8 = "*" + +[[package]] +name = "flake8-pytest-style" +version = "1.7.2" +description = "A flake8 plugin checking common style issues or inconsistencies with pytest-based tests." +category = "dev" +optional = false +python-versions = ">=3.7.2,<4.0.0" +files = [ + {file = "flake8_pytest_style-1.7.2-py3-none-any.whl", hash = "sha256:f5d2aa3219163a052dd92226589d45fab8ea027a3269922f0c4029f548ea5cd1"}, + {file = "flake8_pytest_style-1.7.2.tar.gz", hash = "sha256:b924197c99b951315949920b0e5547f34900b1844348432e67a44ab191582109"}, +] + +[package.dependencies] +flake8-plugin-utils = ">=1.3.2,<2.0.0" + +[[package]] +name = "fonttools" +version = "4.42.1" +description = "Tools to manipulate font files" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ed1a13a27f59d1fc1920394a7f596792e9d546c9ca5a044419dca70c37815d7c"}, + {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9b1ce7a45978b821a06d375b83763b27a3a5e8a2e4570b3065abad240a18760"}, + {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f720fa82a11c0f9042376fd509b5ed88dab7e3cd602eee63a1af08883b37342b"}, + {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db55cbaea02a20b49fefbd8e9d62bd481aaabe1f2301dabc575acc6b358874fa"}, + {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a35981d90feebeaef05e46e33e6b9e5b5e618504672ca9cd0ff96b171e4bfff"}, + {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:68a02bbe020dc22ee0540e040117535f06df9358106d3775e8817d826047f3fd"}, + {file = "fonttools-4.42.1-cp310-cp310-win32.whl", hash = "sha256:12a7c247d1b946829bfa2f331107a629ea77dc5391dfd34fdcd78efa61f354ca"}, + {file = "fonttools-4.42.1-cp310-cp310-win_amd64.whl", hash = "sha256:a398bdadb055f8de69f62b0fc70625f7cbdab436bbb31eef5816e28cab083ee8"}, + {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:689508b918332fb40ce117131633647731d098b1b10d092234aa959b4251add5"}, + {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e36344e48af3e3bde867a1ca54f97c308735dd8697005c2d24a86054a114a71"}, + {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7db825c8adee96fac0692e6e1ecd858cae9affb3b4812cdb9d934a898b29e"}, + {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:113337c2d29665839b7d90b39f99b3cac731f72a0eda9306165a305c7c31d341"}, + {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:37983b6bdab42c501202500a2be3a572f50d4efe3237e0686ee9d5f794d76b35"}, + {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6ed2662a3d9c832afa36405f8748c250be94ae5dfc5283d668308391f2102861"}, + {file = "fonttools-4.42.1-cp311-cp311-win32.whl", hash = "sha256:179737095eb98332a2744e8f12037b2977f22948cf23ff96656928923ddf560a"}, + {file = "fonttools-4.42.1-cp311-cp311-win_amd64.whl", hash = "sha256:f2b82f46917d8722e6b5eafeefb4fb585d23babd15d8246c664cd88a5bddd19c"}, + {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:62f481ac772fd68901573956231aea3e4b1ad87b9b1089a61613a91e2b50bb9b"}, + {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2f806990160d1ce42d287aa419df3ffc42dfefe60d473695fb048355fe0c6a0"}, + {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db372213d39fa33af667c2aa586a0c1235e88e9c850f5dd5c8e1f17515861868"}, + {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d18fc642fd0ac29236ff88ecfccff229ec0386090a839dd3f1162e9a7944a40"}, + {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8708b98c278012ad267ee8a7433baeb809948855e81922878118464b274c909d"}, + {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c95b0724a6deea2c8c5d3222191783ced0a2f09bd6d33f93e563f6f1a4b3b3a4"}, + {file = "fonttools-4.42.1-cp38-cp38-win32.whl", hash = "sha256:4aa79366e442dbca6e2c8595645a3a605d9eeabdb7a094d745ed6106816bef5d"}, + {file = "fonttools-4.42.1-cp38-cp38-win_amd64.whl", hash = "sha256:acb47f6f8680de24c1ab65ebde39dd035768e2a9b571a07c7b8da95f6c8815fd"}, + {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb289b7a815638a7613d46bcf324c9106804725b2bb8ad913c12b6958ffc4ec"}, + {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:53eb5091ddc8b1199330bb7b4a8a2e7995ad5d43376cadce84523d8223ef3136"}, + {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46a0ec8adbc6ff13494eb0c9c2e643b6f009ce7320cf640de106fb614e4d4360"}, + {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cc7d685b8eeca7ae69dc6416833fbfea61660684b7089bca666067cb2937dcf"}, + {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:be24fcb80493b2c94eae21df70017351851652a37de514de553435b256b2f249"}, + {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:515607ec756d7865f23070682622c49d922901943697871fc292277cf1e71967"}, + {file = "fonttools-4.42.1-cp39-cp39-win32.whl", hash = "sha256:0eb79a2da5eb6457a6f8ab904838454accc7d4cccdaff1fd2bd3a0679ea33d64"}, + {file = "fonttools-4.42.1-cp39-cp39-win_amd64.whl", hash = "sha256:7286aed4ea271df9eab8d7a9b29e507094b51397812f7ce051ecd77915a6e26b"}, + {file = "fonttools-4.42.1-py3-none-any.whl", hash = "sha256:9398f244e28e0596e2ee6024f808b06060109e33ed38dcc9bded452fd9bbb853"}, + {file = "fonttools-4.42.1.tar.gz", hash = "sha256:c391cd5af88aacaf41dd7cfb96eeedfad297b5899a39e12f4c2c3706d0a3329d"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "scipy"] +lxml = ["lxml (>=4.0,<5)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.0.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "frictionless" +version = "5.15.10" +description = "Data management framework for Python that provides functionality to describe, extract, validate, and transform tabular data" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frictionless-5.15.10-py3-none-any.whl", hash = "sha256:aeb9648d78ea8cc1a6de22ec84dddd73d340ecc5d1b27a790e1b306672ac9da5"}, + {file = "frictionless-5.15.10.tar.gz", hash = "sha256:70a351f08e18031164e7745dfb8242d12fa5b9ed785c63927687dfe6b8949449"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +chardet = ">=3.0" +humanize = ">=4.2" +isodate = ">=0.6" +jinja2 = ">=3.0" +jsonschema = "<4.18" +marko = ">=1.0" +petl = ">=1.6" +pydantic = ">=2.0" +python-dateutil = ">=2.8" +python-slugify = ">=1.2" +pyyaml = ">=5.3" +requests = ">=2.10" +rfc3986 = ">=1.4" +simpleeval = ">=0.9.11" +stringcase = ">=1.2" +tabulate = ">=0.8.10" +typer = {version = ">=0.5", extras = ["all"]} +typing-extensions = ">=4.3" +validators = ">=0.18" + +[package.extras] +aws = ["boto3 (>=1.9)"] +bigquery = ["google-api-python-client (>=1.12.1)"] +ckan = ["frictionless-ckan-mapper (>=1.0)"] +datasette = ["datasette (>=0.64.2)"] +dev = ["black", "hatch", "httpx", "ipython", "isort", "livemark", "moto", "neovim", "oauth2client", "pyright", "pytest", "pytest-cov", "pytest-dotenv", "pytest-lazy-fixture", "pytest-mock", "pytest-only", "pytest-timeout", "pytest-vcr", "requests-mock", "ruff", "yattag"] +duckdb = ["duckdb (>=0.8)", "duckdb-engine (>=0.7)", "sqlalchemy (>=1.4)"] +excel = ["openpyxl (>=3.0)", "tableschema-to-template (>=0.0)", "xlrd (>=1.2)", "xlwt (>=1.2)"] +github = ["pygithub (>=1.50)"] +gsheets = ["pygsheets (>=2.0)"] +html = ["pyquery (>=1.4)"] +json = ["ijson (>=3.0)", "jsonlines (>=1.2)"] +mysql = ["pymysql (>=1.0)", "sqlalchemy (>=1.4)"] +ods = ["ezodf (>=0.3)", "lxml (>=4.0)"] +pandas = ["pandas (>=1.0)"] +parquet = ["fastparquet (>=0.8)"] +postgresql = ["psycopg (>=3.0)", "psycopg2 (>=2.9)", "sqlalchemy (>=1.4)"] +spss = ["savreaderwriter (>=3.0)"] +sql = ["sqlalchemy (>=1.4)"] +visidata = ["visidata (>=2.10)"] +wkt = ["grako (>=3.99)"] +zenodo = ["pyzenodo3 (>=1.0)"] + +[[package]] +name = "geographiclib" +version = "2.0" +description = "The geodesic routines from GeographicLib" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "geographiclib-2.0-py3-none-any.whl", hash = "sha256:6b7225248e45ff7edcee32becc4e0a1504c606ac5ee163a5656d482e0cd38734"}, + {file = "geographiclib-2.0.tar.gz", hash = "sha256:f7f41c85dc3e1c2d3d935ec86660dc3b2c848c83e17f9a9e51ba9d5146a15859"}, +] + +[[package]] +name = "geopandas" +version = "0.12.2" +description = "Geographic pandas extensions" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "geopandas-0.12.2-py3-none-any.whl", hash = "sha256:0a470e4bf6f5367e6fd83ab6b40405e0b805c8174665bbcb7c4077ed90202912"}, + {file = "geopandas-0.12.2.tar.gz", hash = "sha256:0acdacddefa176525e4da6d9aeeece225da26055c4becdc6e97cf40fa97c27f4"}, +] + +[package.dependencies] +fiona = ">=1.8" +packaging = "*" +pandas = ">=1.0.0" +pyproj = ">=2.6.1.post1" +shapely = ">=1.7" + +[[package]] +name = "geopy" +version = "2.4.0" +description = "Python Geocoding Toolbox" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "geopy-2.4.0-py3-none-any.whl", hash = "sha256:d2639a46d0ce4c091e9688b750ba94348a14b898a1e55c68f4b4a07e7d1afa20"}, + {file = "geopy-2.4.0.tar.gz", hash = "sha256:a59392bf17adb486b25dbdd71fbed27733bdf24a2dac588047a619de56695e36"}, +] + +[package.dependencies] +geographiclib = ">=1.52,<3" + +[package.extras] +aiohttp = ["aiohttp"] +dev = ["coverage", "flake8 (>=5.0,<5.1)", "isort (>=5.10.0,<5.11.0)", "pytest (>=3.10)", "pytest-asyncio (>=0.17)", "readme-renderer", "sphinx (<=4.3.2)", "sphinx-issues", "sphinx-rtd-theme (>=0.5.0)"] +dev-docs = ["readme-renderer", "sphinx (<=4.3.2)", "sphinx-issues", "sphinx-rtd-theme (>=0.5.0)"] +dev-lint = ["flake8 (>=5.0,<5.1)", "isort (>=5.10.0,<5.11.0)"] +dev-test = ["coverage", "pytest (>=3.10)", "pytest-asyncio (>=0.17)", "sphinx (<=4.3.2)"] +requests = ["requests (>=2.16.2)", "urllib3 (>=1.24.2)"] +timezone = ["pytz"] + +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "gitdb" +version = "4.0.10" +description = "Git Object Database" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, + {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.32" +description = "GitPython is a Python library used to interact with Git repositories" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"}, + {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[[package]] +name = "greenlet" +version = "2.0.2" +description = "Lightweight in-process concurrent programming" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[package.extras] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "humanize" +version = "4.8.0" +description = "Python humanize utilities" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "humanize-4.8.0-py3-none-any.whl", hash = "sha256:8bc9e2bb9315e61ec06bf690151ae35aeb65651ab091266941edf97c90836404"}, + {file = "humanize-4.8.0.tar.gz", hash = "sha256:9783373bf1eec713a770ecaa7c2d7a7902c98398009dfa3d8a2df91eec9311e8"}, +] + +[package.extras] +tests = ["freezegun", "pytest", "pytest-cov"] + +[[package]] +name = "identify" +version = "2.5.27" +description = "File identification library for Python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.5.27-py2.py3-none-any.whl", hash = "sha256:fdb527b2dfe24602809b2201e033c2a113d7bdf716db3ca8e3243f735dcecaba"}, + {file = "identify-2.5.27.tar.gz", hash = "sha256:287b75b04a0e22d727bc9a41f0d4f3c1bcada97490fa6eabb5b28f0e9097e733"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "ijson" +version = "3.2.3" +description = "Iterative JSON parser with standard Python iterator interfaces" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"}, + {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"}, + {file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"}, + {file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"}, + {file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, + {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, + {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, + {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"}, + {file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"}, + {file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"}, + {file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"}, + {file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"}, + {file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"}, + {file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"}, + {file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"}, + {file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"}, + {file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"}, + {file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.8.0" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, + {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.0.1" +description = "Read resources from Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.0.1-py3-none-any.whl", hash = "sha256:134832a506243891221b88b4ae1213327eea96ceb4e407a00d790bb0626f45cf"}, + {file = "importlib_resources-6.0.1.tar.gz", hash = "sha256:4359457e42708462b9626a04657c6208ad799ceb41e5c58c57ffa0e6a098a5d4"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "isort" +version = "5.12.0" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, + {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, +] + +[package.extras] +colors = ["colorama (>=0.4.3)"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] +plugins = ["setuptools"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] + +[[package]] +name = "jinja2" +version = "3.0.3" +description = "A very fast and expressive template engine." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, + {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsonlines" +version = "3.1.0" +description = "Library with helpers for the jsonlines file format" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonlines-3.1.0-py3-none-any.whl", hash = "sha256:632f5e38f93dfcb1ac8c4e09780b92af3a55f38f26e7c47ae85109d420b6ad39"}, + {file = "jsonlines-3.1.0.tar.gz", hash = "sha256:2579cb488d96f815b0eb81629e3e6b0332da0962a18fa3532958f7ba14a5c37f"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + +[[package]] +name = "jsonpointer" +version = "1.14" +description = "Identify specific nodes in a JSON document (RFC 6901)" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "jsonpointer-1.14-py2.py3-none-any.whl", hash = "sha256:381b613fd1afd65376fb28948c4744f035e47ab049a9fdde0c48cc1c30b68559"}, + {file = "jsonpointer-1.14.tar.gz", hash = "sha256:c681af823545c731b7b38aedd5d4eee4c5eff87bc0f25e0ff25444a4189eac4d"}, +] + +[[package]] +name = "jsonschema" +version = "2.6.0" +description = "An implementation of JSON Schema validation for Python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "jsonschema-2.6.0-py2.py3-none-any.whl", hash = "sha256:000e68abd33c972a5248544925a0cae7d1125f9bf6c58280d37546b946769a08"}, + {file = "jsonschema-2.6.0.tar.gz", hash = "sha256:6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02"}, +] + +[package.extras] +format = ["rfc3987", "strict-rfc3339", "webcolors"] + +[[package]] +name = "jupyter-core" +version = "5.3.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.3.1-py3-none-any.whl", hash = "sha256:ae9036db959a71ec1cac33081eeb040a79e681f08ab68b0883e9a676c7a90dce"}, + {file = "jupyter_core-5.3.1.tar.gz", hash = "sha256:5ba5c7938a7f97a6b0481463f7ff0dbac7c15ba48cf46fa4035ca6e838aa1aba"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + +[[package]] +name = "lazy-object-proxy" +version = "1.9.0" +description = "A fast and thorough lazy object proxy." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"}, + {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"}, + {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"}, + {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"}, + {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"}, + {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"}, +] + +[[package]] +name = "linear-tsv" +version = "1.1.0" +description = "Line-oriented, tab-separated value format" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "linear-tsv-1.1.0.tar.gz", hash = "sha256:b504d78f4075615ae75de86a16e5680701a441fc84da2a2cf9f94351ab1ccbf5"}, +] + +[package.dependencies] +six = "*" + +[package.extras] +develop = ["tox"] + +[[package]] +name = "lxml" +version = "4.9.3" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" +files = [ + {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, + {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, + {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, + {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"}, + {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"}, + {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, + {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, + {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, + {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, + {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, + {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, + {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, + {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, + {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, + {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, + {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"}, + {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"}, + {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"}, + {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"}, + {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"}, + {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"}, + {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"}, + {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"}, + {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"}, + {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"}, + {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"}, + {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"}, + {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"}, + {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"}, + {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"}, + {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"}, + {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"}, + {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"}, + {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"}, + {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"}, + {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"}, + {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"}, + {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, + {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=0.29.35)"] + +[[package]] +name = "m2r2" +version = "0.3.3.post2" +description = "Markdown and reStructuredText in a single file." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "m2r2-0.3.3.post2-py3-none-any.whl", hash = "sha256:86157721eb6eabcd54d4eea7195890cc58fa6188b8d0abea633383cfbb5e11e3"}, + {file = "m2r2-0.3.3.post2.tar.gz", hash = "sha256:e62bcb0e74b3ce19cda0737a0556b04cf4a43b785072fcef474558f2c1482ca8"}, +] + +[package.dependencies] +docutils = ">=0.19" +mistune = "0.8.4" + +[[package]] +name = "markdown" +version = "3.4.4" +description = "Python implementation of John Gruber's Markdown." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Markdown-3.4.4-py3-none-any.whl", hash = "sha256:a4c1b65c0957b4bd9e7d86ddc7b3c9868fb9670660f6f99f6d1bca8954d5a941"}, + {file = "Markdown-3.4.4.tar.gz", hash = "sha256:225c6123522495d4119a90b3a3ba31a1e87a70369e03f14799ea9c0d7183a3d6"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.0)", "mkdocs-nature (>=0.4)"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markdown2" +version = "2.4.10" +description = "A fast and complete Python implementation of Markdown" +category = "main" +optional = false +python-versions = ">=3.5, <4" +files = [ + {file = "markdown2-2.4.10-py2.py3-none-any.whl", hash = "sha256:e6105800483783831f5dc54f827aa5b44eb137ecef5a70293d8ecfbb4109ecc6"}, + {file = "markdown2-2.4.10.tar.gz", hash = "sha256:cdba126d90dc3aef6f4070ac342f974d63f415678959329cc7909f96cc235d72"}, +] + +[package.extras] +all = ["pygments (>=2.7.3)", "wavedrom"] +code-syntax-highlighting = ["pygments (>=2.7.3)"] +wavedrom = ["wavedrom"] + +[[package]] +name = "marko" +version = "2.0.0" +description = "A markdown parser with high extensibility." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "marko-2.0.0-py3-none-any.whl", hash = "sha256:d5a8221e0d4a9369e662c33ccfe33dea7deb2bebcf1ac3d5a4b46d42ab309e8f"}, + {file = "marko-2.0.0.tar.gz", hash = "sha256:efc26460893250c13751025ae9202ebb1389887036fc0df9009c6ab875467030"}, +] + +[package.extras] +codehilite = ["pygments"] +toc = ["python-slugify"] + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "matplotlib" +version = "3.7.2" +description = "Python plotting package" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de"}, + {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d"}, + {file = "matplotlib-3.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8"}, + {file = "matplotlib-3.7.2-cp310-cp310-win32.whl", hash = "sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a"}, + {file = "matplotlib-3.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4"}, + {file = "matplotlib-3.7.2-cp311-cp311-win32.whl", hash = "sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc"}, + {file = "matplotlib-3.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117"}, + {file = "matplotlib-3.7.2-cp38-cp38-win32.whl", hash = "sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13"}, + {file = "matplotlib-3.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f"}, + {file = "matplotlib-3.7.2-cp39-cp39-win32.whl", hash = "sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20"}, + {file = "matplotlib-3.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11"}, + {file = "matplotlib-3.7.2.tar.gz", hash = "sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} +kiwisolver = ">=1.0.1" +numpy = ">=1.20" +packaging = ">=20.0" +pillow = ">=6.2.0" +pyparsing = ">=2.3.1,<3.1" +python-dateutil = ">=2.7" + +[[package]] +name = "mccabe" +version = "0.6.1" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mistune" +version = "0.8.4" +description = "The fastest markdown parser in pure Python" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, + {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, +] + +[[package]] +name = "mkdocs" +version = "1.5.2" +description = "Project documentation with Markdown." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs-1.5.2-py3-none-any.whl", hash = "sha256:60a62538519c2e96fe8426654a67ee177350451616118a41596ae7c876bb7eac"}, + {file = "mkdocs-1.5.2.tar.gz", hash = "sha256:70d0da09c26cff288852471be03c23f0f521fc15cf16ac89c7a3bfb9ae8d24f9"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.2.1" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +packaging = ">=20.5" +pathspec = ">=0.11.1" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pathspec (==0.11.1)", "platformdirs (==2.2.0)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-exclude" +version = "1.0.2" +description = "A mkdocs plugin that lets you exclude files or trees." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "mkdocs-exclude-1.0.2.tar.gz", hash = "sha256:ba6fab3c80ddbe3fd31d3e579861fd3124513708271180a5f81846da8c7e2a51"}, +] + +[package.dependencies] +mkdocs = "*" + +[[package]] +name = "mkdocs-exclude-search" +version = "0.6.5" +description = "A mkdocs plugin that lets you exclude selected files or sections from the search index." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mkdocs-exclude-search-0.6.5.tar.gz", hash = "sha256:6a126367653caf946c56e446ff30ffdec931438a3e2a8740feff3f8682d52a54"}, + {file = "mkdocs_exclude_search-0.6.5-py3-none-any.whl", hash = "sha256:475e372dd17195700acd00213bcc804280d63f39cb7a0c76a801aceefd2807a6"}, +] + +[package.dependencies] +mkdocs = ">=1.0.4" + +[[package]] +name = "mkdocs-include-markdown-plugin" +version = "4.0.4" +description = "Mkdocs Markdown includer plugin." +category = "main" +optional = false +python-versions = "<3.12,>=3.7" +files = [ + {file = "mkdocs_include_markdown_plugin-4.0.4-py3-none-any.whl", hash = "sha256:792caa91c2c46fa174ad44caa786656ac3f475f2cef5ad33ad28a106f7bc4086"}, + {file = "mkdocs_include_markdown_plugin-4.0.4.tar.gz", hash = "sha256:a5d569a653a610cbbe5b9c0ebb257ce7f94b453e93c3ae2c503b682e77465773"}, +] + +[[package]] +name = "mkdocs-material" +version = "9.2.6" +description = "Documentation that simply works" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs_material-9.2.6-py3-none-any.whl", hash = "sha256:84bc7e79c1d0bae65a77123efd5ef74731b8c3671601c7962c5db8dba50a65ad"}, + {file = "mkdocs_material-9.2.6.tar.gz", hash = "sha256:3806c58dd112e7b9677225e2021035ddbe3220fbd29d9dc812aa7e01f70b5e0a"}, +] + +[package.dependencies] +babel = ">=2.10.3" +colorama = ">=0.4" +jinja2 = ">=3.0" +lxml = ">=4.6" +markdown = ">=3.2" +mkdocs = ">=1.5.2" +mkdocs-material-extensions = ">=1.1" +paginate = ">=0.5.6" +pygments = ">=2.14" +pymdown-extensions = ">=9.9.1" +readtime = ">=2.0" +regex = ">=2022.4.24" +requests = ">=2.26" + +[[package]] +name = "mkdocs-material-extensions" +version = "1.1.1" +description = "Extension pack for Python Markdown and MkDocs Material." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, + {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, +] + +[[package]] +name = "mkdocs-same-dir" +version = "0.1.2" +description = "MkDocs plugin to allow placing mkdocs.yml in the same directory as documentation" +category = "main" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "mkdocs-same-dir-0.1.2.tar.gz", hash = "sha256:dfb0d8e8b4930a8c7fc3f8ad558d647e8243cd4e1b6e6178e5fa8c1423b0ee45"}, + {file = "mkdocs_same_dir-0.1.2-py3-none-any.whl", hash = "sha256:627991894d51a9fcc54e7d3f0c48a2be3c0a248e015bbeb8e90d4f95f1ac59ba"}, +] + +[package.dependencies] +mkdocs = ">=1.0.3,<2.0.0" + +[[package]] +name = "munch" +version = "4.0.0" +description = "A dot-accessible dictionary (a la JavaScript objects)" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "munch-4.0.0-py2.py3-none-any.whl", hash = "sha256:71033c45db9fb677a0b7eb517a4ce70ae09258490e419b0e7f00d1e386ecb1b4"}, + {file = "munch-4.0.0.tar.gz", hash = "sha256:542cb151461263216a4e37c3fd9afc425feeaf38aaa3025cd2a981fadb422235"}, +] + +[package.extras] +testing = ["astroid (>=2.0)", "coverage", "pylint (>=2.3.1,<2.4.0)", "pytest"] +yaml = ["PyYAML (>=5.1.0)"] + +[[package]] +name = "mypy" +version = "1.5.1" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f33592ddf9655a4894aef22d134de7393e95fcbdc2d15c1ab65828eee5c66c70"}, + {file = "mypy-1.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:258b22210a4a258ccd077426c7a181d789d1121aca6db73a83f79372f5569ae0"}, + {file = "mypy-1.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9ec1f695f0c25986e6f7f8778e5ce61659063268836a38c951200c57479cc12"}, + {file = "mypy-1.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:abed92d9c8f08643c7d831300b739562b0a6c9fcb028d211134fc9ab20ccad5d"}, + {file = "mypy-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:a156e6390944c265eb56afa67c74c0636f10283429171018446b732f1a05af25"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ac9c21bfe7bc9f7f1b6fae441746e6a106e48fc9de530dea29e8cd37a2c0cc4"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51cb1323064b1099e177098cb939eab2da42fea5d818d40113957ec954fc85f4"}, + {file = "mypy-1.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:596fae69f2bfcb7305808c75c00f81fe2829b6236eadda536f00610ac5ec2243"}, + {file = "mypy-1.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:32cb59609b0534f0bd67faebb6e022fe534bdb0e2ecab4290d683d248be1b275"}, + {file = "mypy-1.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:159aa9acb16086b79bbb0016145034a1a05360626046a929f84579ce1666b315"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f6b0e77db9ff4fda74de7df13f30016a0a663928d669c9f2c057048ba44f09bb"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:26f71b535dfc158a71264e6dc805a9f8d2e60b67215ca0bfa26e2e1aa4d4d373"}, + {file = "mypy-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fc3a600f749b1008cc75e02b6fb3d4db8dbcca2d733030fe7a3b3502902f161"}, + {file = "mypy-1.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:26fb32e4d4afa205b24bf645eddfbb36a1e17e995c5c99d6d00edb24b693406a"}, + {file = "mypy-1.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:82cb6193de9bbb3844bab4c7cf80e6227d5225cc7625b068a06d005d861ad5f1"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a465ea2ca12804d5b34bb056be3a29dc47aea5973b892d0417c6a10a40b2d65"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9fece120dbb041771a63eb95e4896791386fe287fefb2837258925b8326d6160"}, + {file = "mypy-1.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d28ddc3e3dfeab553e743e532fb95b4e6afad51d4706dd22f28e1e5e664828d2"}, + {file = "mypy-1.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:57b10c56016adce71fba6bc6e9fd45d8083f74361f629390c556738565af8eeb"}, + {file = "mypy-1.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff0cedc84184115202475bbb46dd99f8dcb87fe24d5d0ddfc0fe6b8575c88d2f"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8f772942d372c8cbac575be99f9cc9d9fb3bd95c8bc2de6c01411e2c84ebca8a"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5d627124700b92b6bbaa99f27cbe615c8ea7b3402960f6372ea7d65faf376c14"}, + {file = "mypy-1.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:361da43c4f5a96173220eb53340ace68cda81845cd88218f8862dfb0adc8cddb"}, + {file = "mypy-1.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:330857f9507c24de5c5724235e66858f8364a0693894342485e543f5b07c8693"}, + {file = "mypy-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:c543214ffdd422623e9fedd0869166c2f16affe4ba37463975043ef7d2ea8770"}, + {file = "mypy-1.5.1-py3-none-any.whl", hash = "sha256:f757063a83970d67c444f6e01d9550a7402322af3557ce7630d3c957386fa8f5"}, + {file = "mypy-1.5.1.tar.gz", hash = "sha256:b031b9601f1060bf1281feab89697324726ba0c0bae9d7cd7ab4b690940f0b92"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nbformat" +version = "5.9.2" +description = "The Jupyter Notebook format" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "nbformat-5.9.2-py3-none-any.whl", hash = "sha256:1c5172d786a41b82bcfd0c23f9e6b6f072e8fb49c39250219e4acfff1efe89e9"}, + {file = "nbformat-5.9.2.tar.gz", hash = "sha256:5f98b5ba1997dff175e77e0c17d5c10a96eaed2cbd1de3533d1fc35d5e111192"}, +] + +[package.dependencies] +fastjsonschema = "*" +jsonschema = ">=2.6" +jupyter-core = "*" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +category = "dev" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "nose" +version = "1.3.7" +description = "nose extends unittest to make testing easier" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "nose-1.3.7-py2-none-any.whl", hash = "sha256:dadcddc0aefbf99eea214e0f1232b94f2fa9bd98fa8353711dacb112bfcbbb2a"}, + {file = "nose-1.3.7-py3-none-any.whl", hash = "sha256:9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac"}, + {file = "nose-1.3.7.tar.gz", hash = "sha256:f1bffef9cbc82628f6e7d7b40d7e255aefaa1adb6a1b1d26c69a8b79e6208a98"}, +] + +[[package]] +name = "numpy" +version = "1.23.5" +description = "NumPy is the fundamental package for array computing with Python." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"}, + {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"}, + {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"}, + {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"}, + {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"}, + {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"}, + {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"}, + {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"}, + {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"}, + {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, + {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, +] + +[[package]] +name = "oemof-network" +version = "0.4.0" +description = "The network/graph submodules of oemof." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "oemof.network-0.4.0-py2.py3-none-any.whl", hash = "sha256:afdfd4e1056746aa470bc5b72053c9819b48d56c4d2ec4176b5bce85e228548a"}, + {file = "oemof.network-0.4.0.tar.gz", hash = "sha256:5eee3b8f5e8b0a45f5f573b36f3b8c5b6d7460290e05ef72324bca7e9643a58b"}, +] + +[package.dependencies] +blinker = "*" +dill = "*" +networkx = "*" +pandas = "*" + +[package.extras] +dev = ["pytest"] + +[[package]] +name = "oemof-solph" +version = "0.4.5" +description = "A model generator for energy system modelling and optimisation." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "oemof.solph-0.4.5-py2.py3-none-any.whl", hash = "sha256:dc0b5e680a7cb61cabf711f7afcccac2bfde2f7a07195085a39a52ca269e2457"}, + {file = "oemof.solph-0.4.5.tar.gz", hash = "sha256:d0228afd565596fb6dba8c7282876ac749273325bc0a34d02c36726805450c26"}, +] + +[package.dependencies] +blinker = "*" +dill = "*" +networkx = "*" +numpy = "<1.24" +"oemof.network" = "*" +"oemof.tools" = "*" +pandas = "*" +pyomo = ">=5.7.0,<5.7.3" + +[package.extras] +dev = ["pytest", "sphinx", "sphinx-rtd-theme"] +dummy = ["oemof"] + +[[package]] +name = "oemof-tabular" +version = "0.0.3" +description = "Load oemof energy systems from tabular data sources." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "oemof.tabular-0.0.3-py2.py3-none-any.whl", hash = "sha256:86b97c2c684f0373683f3c194ff14eb3cb3afef7f793bf5dff4ddc7a203972e5"}, + {file = "oemof.tabular-0.0.3.tar.gz", hash = "sha256:871fdadf6d8508b0bd27cb654576cbd31475681bf68551c6569dd5d3244d0634"}, +] + +[package.dependencies] +datapackage = "1.5.1" +"oemof.solph" = "0.4.5" +pandas = ">=0.22" +paramiko = "*" +tableschema = "1.7.4" +toml = "*" + +[package.extras] +aggregation = ["tsam"] +cli = ["click"] +geometry = ["geojson", "pyproj", "pyshp", "scipy", "shapely"] +plots = ["matplotlib", "plotly"] + +[[package]] +name = "oemof-tools" +version = "0.4.2" +description = "Tiny tools of the oemof project." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "oemof.tools-0.4.2-py2.py3-none-any.whl", hash = "sha256:0b2db03c92ee25c986348a45d1ef845449bcdc23f0db0d6a54c2e4ea8a441574"}, + {file = "oemof.tools-0.4.2.tar.gz", hash = "sha256:10ce086dbc6f4fa4bef92866c72de3ce4e23e55c5bdda8469d44643601b66bd0"}, +] + +[package.extras] +dev = ["pytest", "sphinx", "sphinx-rtd-theme"] + +[[package]] +name = "oemoflex" +version = "0.0.1" +description = "" +category = "main" +optional = false +python-versions = "*" +files = [] +develop = false + +[package.dependencies] +dynaconf = "*" +frictionless = "*" +matplotlib = "*" +"oemof.tabular" = "0.0.3" +pandas = "*" +plotly = "*" +pyyaml = "*" + +[package.extras] +dev = ["black (==20.8b1)", "coverage", "flake8", "pytest"] + +[package.source] +type = "git" +url = "https://git@github.com/rl-institut/oemoflex" +reference = "0.0.1" +resolved_reference = "80ec9ee2c6a01c3fe6b777c409255329246dad53" + +[[package]] +name = "openpyxl" +version = "3.1.0" +description = "A Python library to read/write Excel 2010 xlsx/xlsm files" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "openpyxl-3.1.0-py2.py3-none-any.whl", hash = "sha256:24d7d361025d186ba91eff58135d50855cf035a84371b891e58fb6eb5125660f"}, + {file = "openpyxl-3.1.0.tar.gz", hash = "sha256:eccedbe1cdd8b2494057e73959b496821141038dbb7eb9266ea59e3f34208231"}, +] + +[package.dependencies] +et-xmlfile = "*" + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "paginate" +version = "0.5.6" +description = "Divides large result sets into pages for easier browsing" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, +] + +[[package]] +name = "pandas" +version = "1.5.3" +description = "Powerful data structures for data analysis, time series, and statistics" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.1" +pytz = ">=2020.1" + +[package.extras] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] + +[[package]] +name = "paramiko" +version = "3.3.1" +description = "SSH2 protocol library" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "paramiko-3.3.1-py3-none-any.whl", hash = "sha256:b7bc5340a43de4287bbe22fe6de728aa2c22468b2a849615498dd944c2f275eb"}, + {file = "paramiko-3.3.1.tar.gz", hash = "sha256:6a3777a961ac86dbef375c5f5b8d50014a1a96d0fd7f054a43bc880134b0ff77"}, +] + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +invoke = ["invoke (>=2.0)"] + +[[package]] +name = "pathspec" +version = "0.11.2" +description = "Utility library for gitignore style pattern matching of file paths." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, + {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, +] + +[[package]] +name = "pbr" +version = "5.11.1" +description = "Python Build Reasonableness" +category = "dev" +optional = false +python-versions = ">=2.6" +files = [ + {file = "pbr-5.11.1-py2.py3-none-any.whl", hash = "sha256:567f09558bae2b3ab53cb3c1e2e33e726ff3338e7bae3db5dc954b3a44eef12b"}, + {file = "pbr-5.11.1.tar.gz", hash = "sha256:aefc51675b0b533d56bb5fd1c8c6c0522fe31896679882e1c4c63d5e4a0fccb3"}, +] + +[[package]] +name = "petl" +version = "1.7.14" +description = "A Python package for extracting, transforming and loading tables of data." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "petl-1.7.14.tar.gz", hash = "sha256:d4802e3c4804bf85f2267a0102fcad35c61e6a37c90d9e1a1674331f35a90a7f"}, +] + +[package.extras] +avro = ["fastavro (>=0.24.0)"] +bcolz = ["bcolz (>=1.2.1)"] +db = ["SQLAlchemy (>=1.3.6,<2.0)"] +hdf5 = ["cython (>=0.29.13)", "numexpr (>=2.6.9)", "numpy (>=1.16.4)", "tables (>=3.5.2)"] +http = ["aiohttp (>=3.6.2)", "requests"] +interval = ["intervaltree (>=3.0.2)"] +numpy = ["numpy (>=1.16.4)"] +pandas = ["pandas (>=0.24.2)"] +remote = ["fsspec (>=0.7.4)"] +smb = ["smbprotocol (>=1.0.1)"] +whoosh = ["whoosh"] +xls = ["xlrd (>=2.0.1)", "xlwt (>=1.3.0)"] +xlsx = ["openpyxl (>=2.6.2)"] +xpath = ["lxml (>=4.4.0)"] + +[[package]] +name = "pillow" +version = "10.0.0" +description = "Python Imaging Library (Fork)" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, + {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, + {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, + {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, + {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, + {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, + {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, + {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, + {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, + {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "plac" +version = "1.3.5" +description = "The smartest command line arguments parser in the world" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "plac-1.3.5-py2.py3-none-any.whl", hash = "sha256:a8933d21a40fe2cec177a2f96217425a4e889d275aa3e25ecf9a9640ab16d416"}, + {file = "plac-1.3.5.tar.gz", hash = "sha256:38bdd864d0450fb748193aa817b9c458a8f5319fbf97b2261151cfc0a5812090"}, +] + +[[package]] +name = "platformdirs" +version = "3.10.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, + {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "plotly" +version = "5.16.1" +description = "An open-source, interactive data visualization library for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "plotly-5.16.1-py2.py3-none-any.whl", hash = "sha256:19cc34f339acd4e624177806c14df22f388f23fb70658b03aad959a0e650a0dc"}, + {file = "plotly-5.16.1.tar.gz", hash = "sha256:295ac25edeb18c893abb71dcadcea075b78fd6fdf07cee4217a4e1009667925b"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "ply" +version = "3.11" +description = "Python Lex & Yacc" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, + {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, +] + +[[package]] +name = "pre-commit" +version = "2.21.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pre_commit-2.21.0-py2.py3-none-any.whl", hash = "sha256:e2f91727039fc39a92f58a588a25b87f936de6567eed4f0e673e0507edc75bad"}, + {file = "pre_commit-2.21.0.tar.gz", hash = "sha256:31ef31af7e474a8d8995027fefdfcf509b5c913ff31f2015b4ec4beb26a6f658"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "psutil" +version = "5.9.5" +description = "Cross-platform lib for process and system monitoring in Python." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, + {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, + {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, + {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, + {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, + {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, + {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, + {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, + {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, + {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, + {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, + {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, + {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, + {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "pulp" +version = "2.7.0" +description = "PuLP is an LP modeler written in python. PuLP can generate MPS or LP files and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear problems." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "PuLP-2.7.0-py3-none-any.whl", hash = "sha256:b6de42c929e80325bf44cc7a2997f02535440800c376b9eb8cb7b4670ed53769"}, + {file = "PuLP-2.7.0.tar.gz", hash = "sha256:e73ee6b32d639c9b8cf4b4aded334ba158be5f8313544e056f796ace0a10ae63"}, +] + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] + +[[package]] +name = "pycodestyle" +version = "2.7.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"}, + {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "2.3.0" +description = "Data validation using Python type hints" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, + {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.6.3" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.6.3" +description = "" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, + {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, + {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, + {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, + {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, + {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, + {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, + {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, + {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, + {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, + {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, + {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, + {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, + {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, + {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, + {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pyflakes" +version = "2.3.1" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"}, + {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, +] + +[[package]] +name = "pygments" +version = "2.16.1" +description = "Pygments is a syntax highlighting package written in Python." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, + {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pygraphviz" +version = "1.11" +description = "Python interface to Graphviz" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygraphviz-1.11.zip", hash = "sha256:a97eb5ced266f45053ebb1f2c6c6d29091690503e3a5c14be7f908b37b06f2d4"}, +] + +[[package]] +name = "pylint" +version = "2.17.5" +description = "python code static checker" +category = "dev" +optional = false +python-versions = ">=3.7.2" +files = [ + {file = "pylint-2.17.5-py3-none-any.whl", hash = "sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413"}, + {file = "pylint-2.17.5.tar.gz", hash = "sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252"}, +] + +[package.dependencies] +astroid = ">=2.15.6,<=2.17.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, +] +isort = ">=4.2.5,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pylint-django" +version = "2.5.3" +description = "A Pylint plugin to help Pylint understand the Django web framework" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pylint-django-2.5.3.tar.gz", hash = "sha256:0ac090d106c62fe33782a1d01bda1610b761bb1c9bf5035ced9d5f23a13d8591"}, + {file = "pylint_django-2.5.3-py3-none-any.whl", hash = "sha256:56b12b6adf56d548412445bd35483034394a1a94901c3f8571980a13882299d5"}, +] + +[package.dependencies] +pylint = ">=2.0,<3" +pylint-plugin-utils = ">=0.7" + +[package.extras] +for-tests = ["coverage", "django-tables2", "django-tastypie", "factory-boy", "pylint (>=2.13)", "pytest", "wheel"] +with-django = ["Django"] + +[[package]] +name = "pylint-plugin-utils" +version = "0.8.2" +description = "Utilities and helpers for writing Pylint plugins" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "pylint_plugin_utils-0.8.2-py3-none-any.whl", hash = "sha256:ae11664737aa2effbf26f973a9e0b6779ab7106ec0adc5fe104b0907ca04e507"}, + {file = "pylint_plugin_utils-0.8.2.tar.gz", hash = "sha256:d3cebf68a38ba3fba23a873809155562571386d4c1b03e5b4c4cc26c3eee93e4"}, +] + +[package.dependencies] +pylint = ">=1.7" + +[[package]] +name = "pymdown-extensions" +version = "10.2.1" +description = "Extension pack for Python Markdown." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pymdown_extensions-10.2.1-py3-none-any.whl", hash = "sha256:bded105eb8d93f88f2f821f00108cb70cef1269db6a40128c09c5f48bfc60ea4"}, + {file = "pymdown_extensions-10.2.1.tar.gz", hash = "sha256:d0c534b4a5725a4be7ccef25d65a4c97dba58b54ad7c813babf0eb5ba9c81591"}, +] + +[package.dependencies] +markdown = ">=3.2" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.12)"] + +[[package]] +name = "pynacl" +version = "1.5.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] + +[package.dependencies] +cffi = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] + +[[package]] +name = "pyomo" +version = "5.7.2" +description = "Pyomo: Python Optimization Modeling Objects" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Pyomo-5.7.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:86f122650fd4949507bd647ef49155d7ee5916e9a78dffae5d8ff2a97a0482aa"}, + {file = "Pyomo-5.7.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e67f25344eaea1a9c0d364e317e4ee1a26b2988ccb7a9bb559889e933d53278a"}, + {file = "Pyomo-5.7.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:968c7fb8db7dc47fb27895f565dfa916acba0c2499a2e49d1d4221ccc2845cab"}, + {file = "Pyomo-5.7.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:fd866ebf71ca41c5f4dea5662f15d110183dad31b5a5ef831c016af005d01cb4"}, + {file = "Pyomo-5.7.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:ace76a9bf0b89e8462c12b166511709c065f5c2dac3a78260adc81c22fe6264d"}, + {file = "Pyomo-5.7.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3600e2b63722569244f35432e310c39215342576d6707abf89bc6e43c9b10236"}, + {file = "Pyomo-5.7.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:da60346b3c7f096e72776312fab058fc8463efc1b37a565de1a462356e5a9e31"}, + {file = "Pyomo-5.7.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:c088c1c1748a07d25bde5268057077bf6957780cfa926bac079f5084bccfb701"}, + {file = "Pyomo-5.7.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:498ba9a3294da3a9188dfaf2089c7d91984866d5a618d6864910b677cd966025"}, + {file = "Pyomo-5.7.2-cp36-cp36m-win_amd64.whl", hash = "sha256:051c782b136a182c881197110c668a42582e5b6c950975cfd3268c686b04eb32"}, + {file = "Pyomo-5.7.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f24e87c98ae31e0c2c1a3bc86420d1dc26462946651499fe7b135b1faaa04504"}, + {file = "Pyomo-5.7.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:879ea064e1ecb2c47262d79efe3769725687aa2bad3f296b2e99cf7abe9bb7cd"}, + {file = "Pyomo-5.7.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f07933b98398aa06080ec58b2d044b58ba539064a69c3295530dca718f3abd5a"}, + {file = "Pyomo-5.7.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6e3ee758c9b58fb04500a82e967ebb29e0b5278343583996fd6f766caa5c427c"}, + {file = "Pyomo-5.7.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:59984b5d438ae6450888189df43c74dd544ad0eb5a6b5000254a30afdda8f6d1"}, + {file = "Pyomo-5.7.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:075ef81be5d29e0be1733f86981416d7c9695c0e5f477a55e5824ab07e22dfa9"}, + {file = "Pyomo-5.7.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:c89a481fbddf735ceb4f09a6d6f8973dd97efcf143097b87bdde4bc98eb98d3f"}, + {file = "Pyomo-5.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:9c619fe04bad636cbaa28487961cfde525f205d08668217008951d95fed76d91"}, + {file = "Pyomo-5.7.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:e3df7290c017ecc4b8e84dd4b307d60d5d5ab5fc2480086c464375c0882094da"}, + {file = "Pyomo-5.7.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7c65185a57643821847874ab35c1bebd0703e88118fca8153672f70413b7f47e"}, + {file = "Pyomo-5.7.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:7907f380c1fd1e2b13730087fdef57230ba6638d6c499605a105b3a048c61530"}, + {file = "Pyomo-5.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:68ee8ce0b8a941efaa7108ab3dbc73d940ebd51406f8a1bdc0beeb6aaaad5ecd"}, + {file = "Pyomo-5.7.2.tar.gz", hash = "sha256:f10ada18ade84b16225dc519ef1788dd6d5f22cb22d0ea44db64c96d14cb7bb0"}, +] + +[package.dependencies] +ply = "*" +PyUtilib = ">=6.0.0" +six = ">=1.4" + +[[package]] +name = "pyparsing" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "main" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyproj" +version = "3.5.0" +description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyproj-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6475ce653880938468a1a1b7321267243909e34b972ba9e53d5982c41d555918"}, + {file = "pyproj-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61e4ad57d89b03a7b173793b31bca8ee110112cde1937ef0f42a70b9120c827d"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdd2021bb6f7f346bfe1d2a358aa109da017d22c4704af2d994e7c7ee0a7a53"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5674923351e76222e2c10c58b5e1ac119d7a46b270d822c463035971b06f724b"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd5e2b6aa255023c4acd0b977590f1f7cc801ba21b4d806fcf6dfac3474ebb83"}, + {file = "pyproj-3.5.0-cp310-cp310-win32.whl", hash = "sha256:6f316a66031a14e9c5a88c91f8b77aa97f5454895674541ed6ab630b682be35d"}, + {file = "pyproj-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:f7c2f4d9681e810cf40239caaca00079930a6d9ee6591139b88d592d36051d82"}, + {file = "pyproj-3.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7572983134e310e0ca809c63f1722557a040fe9443df5f247bf11ba887eb1229"}, + {file = "pyproj-3.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eccb417b91d0be27805dfc97550bfb8b7db94e9fe1db5ebedb98f5b88d601323"}, + {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:621d78a9d8bf4d06e08bef2471021fbcb1a65aa629ad4a20c22e521ce729cc20"}, + {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9a024370e917c899bff9171f03ea6079deecdc7482a146a2c565f3b9df134ea"}, + {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b7c2113c4d11184a238077ec85e31eda1dcc58ffeb9a4429830e0a7036e787d"}, + {file = "pyproj-3.5.0-cp311-cp311-win32.whl", hash = "sha256:a730f5b4c98c8a0f312437873e6e34dbd4cc6dc23d5afd91a6691c62724b1f68"}, + {file = "pyproj-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:e97573de0ab3bbbcb4c7748bc41f4ceb6da10b45d35b1a294b5820701e7c25f0"}, + {file = "pyproj-3.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2b708fd43453b985642b737d4a6e7f1d6a0ab1677ffa4e14cc258537b49224b0"}, + {file = "pyproj-3.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b60d93a200639e8367c6542a964fd0aa2dbd152f256c1831dc18cd5aa470fb8a"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38862fe07316ae12b79d82d298e390973a4f00b684f3c2d037238e20e00610ba"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b65f2a38cd9e16883dbb0f8ae82bdf8f6b79b1b02975c78483ab8428dbbf2f"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b752b7d9c4b08181c7e8c0d9c7f277cbefff42227f34d3310696a87c863d9dd3"}, + {file = "pyproj-3.5.0-cp38-cp38-win32.whl", hash = "sha256:b937215bfbaf404ec8f03ca741fc3f9f2c4c2c5590a02ccddddd820ae3c71331"}, + {file = "pyproj-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:97ed199033c2c770e7eea2ef80ff5e6413426ec2d7ec985b869792f04ab95d05"}, + {file = "pyproj-3.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:052c49fce8b5d55943a35c36ccecb87350c68b48ba95bc02a789770c374ef819"}, + {file = "pyproj-3.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1507138ea28bf2134d31797675380791cc1a7156a3aeda484e65a78a4aba9b62"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02742ef3d846401861a878a61ef7ad911ea7539d6cc4619ddb52dbdf7b45aee"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:385b0341861d3ebc8cad98337a738821dcb548d465576527399f4955ca24b6ed"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fe6bb1b68a35d07378d38be77b5b2f8dd2bea5910c957bfcc7bee55988d3910"}, + {file = "pyproj-3.5.0-cp39-cp39-win32.whl", hash = "sha256:5c4b85ac10d733c42d73a2e6261c8d6745bf52433a31848dd1b6561c9a382da3"}, + {file = "pyproj-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:1798ff7d65d9057ebb2d017ffe8403268b8452f24d0428b2140018c25c7fa1bc"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d711517a8487ef3245b08dc82f781a906df9abb3b6cb0ce0486f0eeb823ca570"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:788a5dadb532644a64efe0f5f01bf508c821eb7e984f13a677d56002f1e8a67a"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73f7960a97225812f9b1d7aeda5fb83812f38de9441e3476fcc8abb3e2b2f4de"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fde5ece4d2436b5a57c8f5f97b49b5de06a856d03959f836c957d3e609f2de7e"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e08db25b61cf024648d55973cc3d1c3f1d0818fabf594d5f5a8e2318103d2aa0"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a87b419a2a352413fbf759ecb66da9da50bd19861c8f26db6a25439125b27b9"}, + {file = "pyproj-3.5.0.tar.gz", hash = "sha256:9859d1591c1863414d875ae0759e72c2cffc01ab989dc64137fbac572cc81bf6"}, +] + +[package.dependencies] +certifi = "*" + +[[package]] +name = "pyquery" +version = "2.0.0" +description = "A jquery-like library for python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pyquery-2.0.0-py3-none-any.whl", hash = "sha256:8dfc9b4b7c5f877d619bbae74b1898d5743f6ca248cfd5d72b504dd614da312f"}, + {file = "pyquery-2.0.0.tar.gz", hash = "sha256:963e8d4e90262ff6d8dec072ea97285dc374a2f69cad7776f4082abcf6a1d8ae"}, +] + +[package.dependencies] +cssselect = ">=1.2.0" +lxml = ">=2.1" + +[package.extras] +test = ["pytest", "pytest-cov", "requests", "webob", "webtest"] + +[[package]] +name = "pyreadline3" +version = "3.4.1" +description = "A python implementation of GNU readline." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, + {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, +] + +[[package]] +name = "pytest" +version = "6.2.5" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, + {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, +] + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +toml = "*" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-slugify" +version = "8.0.1" +description = "A Python slugify application that also handles Unicode" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python-slugify-8.0.1.tar.gz", hash = "sha256:ce0d46ddb668b3be82f4ed5e503dbc33dd815d83e2eb6824211310d3fb172a27"}, + {file = "python_slugify-8.0.1-py2.py3-none-any.whl", hash = "sha256:70ca6ea68fe63ecc8fa4fcf00ae651fc8a5d02d93dcd12ae6d4fc7ca46c4d395"}, +] + +[package.dependencies] +text-unidecode = ">=1.3" + +[package.extras] +unidecode = ["Unidecode (>=1.1.1)"] + +[[package]] +name = "pytz" +version = "2023.3" +description = "World timezone definitions, modern and historical" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, + {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, +] + +[[package]] +name = "pyutilib" +version = "6.0.0" +description = "PyUtilib: A collection of Python utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "PyUtilib-6.0.0-py2.py3-none-any.whl", hash = "sha256:f1f82d05ad8c42baeef915c8d3d97c0a3cbed6c506c857ab0ab7694dea50ebd8"}, + {file = "PyUtilib-6.0.0.tar.gz", hash = "sha256:d3c14f8ed9028a831b2bf51b8ab7776eba87e66cfc58a06b99c359aaa640f040"}, +] + +[package.dependencies] +nose = "*" +six = "*" + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyxlsb" +version = "1.0.10" +description = "Excel 2007-2010 Binary Workbook (xlsb) parser" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pyxlsb-1.0.10-py2.py3-none-any.whl", hash = "sha256:87c122a9a622e35ca5e741d2e541201d28af00fb46bec492cfa9586890b120b4"}, + {file = "pyxlsb-1.0.10.tar.gz", hash = "sha256:8062d1ea8626d3f1980e8b1cfe91a4483747449242ecb61013bc2df85435f685"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + +[[package]] +name = "rasterio" +version = "1.3.8" +description = "Fast and direct raster I/O for use with Numpy and SciPy" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rasterio-1.3.8-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:fea5db183fd1c85e7f41651af5b474af09a68a5e12ce37cad8cc7708843f1ea4"}, + {file = "rasterio-1.3.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e2207003854af60a879cdd87da033cbf86a53585dbf2a49045f66decc3bbb01"}, + {file = "rasterio-1.3.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ec3bfc55c793d1dc7437f3d8b55116db5ea1cf4e0c25c96999fd99daf1ae6f"}, + {file = "rasterio-1.3.8-cp310-cp310-win_amd64.whl", hash = "sha256:ce5c3193b141d23fe081e6b97665f072af48556622da6ae4d45d3a2192e38c3f"}, + {file = "rasterio-1.3.8-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d5ccc8e6d30534d510ce5099d4a35616611cadcae79aa1216150c2696e03ddde"}, + {file = "rasterio-1.3.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3b8410be847e8fd96cbe744e28e484437b370830052b5dcc7b11efc8c73fffc"}, + {file = "rasterio-1.3.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8e1b456f58b9ae023026730320424091af504ef066418ddcd296b9014845ee"}, + {file = "rasterio-1.3.8-cp311-cp311-win_amd64.whl", hash = "sha256:0323332ed1bfad522e53a3da45e0d3453e603862c3d2c08d8a639a7be76853fb"}, + {file = "rasterio-1.3.8-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:d177157a9a033a0642b3102d9f9e169bada56f1e25c982d2549359a3f397dcff"}, + {file = "rasterio-1.3.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:969b943d3746bad7cc9e2433deb4d21a6f0b21a5b46daeb95530b79fe010910b"}, + {file = "rasterio-1.3.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eefa29a8d5dfd6537fee6e7f28be7b78ceedb85026851c58759563ef7541dc0c"}, + {file = "rasterio-1.3.8-cp38-cp38-win_amd64.whl", hash = "sha256:d184ad8d29c9c8a04d265cdc229d446c59b142d36228926a17767c1758469c8a"}, + {file = "rasterio-1.3.8-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4c9451f51e175940223ad2d3b205a303a810f5d396fbaf0f17fbde6ee1b74737"}, + {file = "rasterio-1.3.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f57d73b5713d8096ec2fc9bb929bf3ce995f3f34d95e32f30192d180921d341a"}, + {file = "rasterio-1.3.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6c466b2b7a49f8ab3ee0ad0974185b806b7c19427dbf3e1cf4372ce0d52b2ee"}, + {file = "rasterio-1.3.8-cp39-cp39-win_amd64.whl", hash = "sha256:3b654e9fd64ad1f68699376a25bd1b403f8b023a75af42e3b26effda990428df"}, + {file = "rasterio-1.3.8.tar.gz", hash = "sha256:ffdd18e78efdf8ad5861065fd812a66dd34264293317ff6540a078ea891cdef8"}, +] + +[package.dependencies] +affine = "*" +attrs = "*" +certifi = "*" +click = ">=4.0" +click-plugins = "*" +cligj = ">=0.5" +numpy = ">=1.18" +setuptools = "*" +snuggs = ">=1.4.1" + +[package.extras] +all = ["boto3 (>=1.2.4)", "ghp-import", "hypothesis", "ipython (>=2.0)", "matplotlib", "numpydoc", "packaging", "pytest (>=2.8.2)", "pytest-cov (>=2.2.0)", "shapely", "sphinx", "sphinx-rtd-theme"] +docs = ["ghp-import", "numpydoc", "sphinx", "sphinx-rtd-theme"] +ipython = ["ipython (>=2.0)"] +plot = ["matplotlib"] +s3 = ["boto3 (>=1.2.4)"] +test = ["boto3 (>=1.2.4)", "hypothesis", "packaging", "pytest (>=2.8.2)", "pytest-cov (>=2.2.0)", "shapely"] + +[[package]] +name = "rasterstats" +version = "0.18.0" +description = "Summarize geospatial raster datasets based on vector geometries" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "rasterstats-0.18.0-py3-none-any.whl", hash = "sha256:00014f422160f6047d3431c9132ebe3d356021ca8e22c6f40d04764f98a60795"}, + {file = "rasterstats-0.18.0.tar.gz", hash = "sha256:29389bfcbeac1a4206aba6e1d795058ec8a64d560efad48156c27fad97c2e09a"}, +] + +[package.dependencies] +affine = "<3.0" +click = ">7.1" +cligj = ">=0.4" +fiona = "<1.9" +numpy = ">=1.9" +rasterio = ">=1.0" +shapely = "*" +simplejson = "*" + +[package.extras] +dev = ["numpydoc", "rasterstats[test]", "twine"] +test = ["coverage", "pyshp (>=1.1.4)", "pytest (>=4.6)", "pytest-cov (>=2.2.0)", "simplejson"] + +[[package]] +name = "readtime" +version = "3.0.0" +description = "Calculates the time some text takes the average human to read, based on Medium's read time forumula" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "readtime-3.0.0.tar.gz", hash = "sha256:76c5a0d773ad49858c53b42ba3a942f62fbe20cc8c6f07875797ac7dc30963a9"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.0.1" +markdown2 = ">=2.4.3" +pyquery = ">=1.2" + +[[package]] +name = "regex" +version = "2023.8.8" +description = "Alternative regular expression module, to replace re." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "regex-2023.8.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88900f521c645f784260a8d346e12a1590f79e96403971241e64c3a265c8ecdb"}, + {file = "regex-2023.8.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3611576aff55918af2697410ff0293d6071b7e00f4b09e005d614686ac4cd57c"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0ccc8f2698f120e9e5742f4b38dc944c38744d4bdfc427616f3a163dd9de5"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c662a4cbdd6280ee56f841f14620787215a171c4e2d1744c9528bed8f5816c96"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf0633e4a1b667bfe0bb10b5e53fe0d5f34a6243ea2530eb342491f1adf4f739"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:551ad543fa19e94943c5b2cebc54c73353ffff08228ee5f3376bd27b3d5b9800"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de2619f5ea58474f2ac211ceea6b615af2d7e4306220d4f3fe690c91988a61"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ec4b3f0aebbbe2fc0134ee30a791af522a92ad9f164858805a77442d7d18570"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ae646c35cb9f820491760ac62c25b6d6b496757fda2d51be429e0e7b67ae0ab"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca339088839582d01654e6f83a637a4b8194d0960477b9769d2ff2cfa0fa36d2"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d9b6627408021452dcd0d2cdf8da0534e19d93d070bfa8b6b4176f99711e7f90"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:bd3366aceedf274f765a3a4bc95d6cd97b130d1dda524d8f25225d14123c01db"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7aed90a72fc3654fba9bc4b7f851571dcc368120432ad68b226bd593f3f6c0b7"}, + {file = "regex-2023.8.8-cp310-cp310-win32.whl", hash = "sha256:80b80b889cb767cc47f31d2b2f3dec2db8126fbcd0cff31b3925b4dc6609dcdb"}, + {file = "regex-2023.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:b82edc98d107cbc7357da7a5a695901b47d6eb0420e587256ba3ad24b80b7d0b"}, + {file = "regex-2023.8.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1e7d84d64c84ad97bf06f3c8cb5e48941f135ace28f450d86af6b6512f1c9a71"}, + {file = "regex-2023.8.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce0f9fbe7d295f9922c0424a3637b88c6c472b75eafeaff6f910494a1fa719ef"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06c57e14ac723b04458df5956cfb7e2d9caa6e9d353c0b4c7d5d54fcb1325c46"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7a9aaa5a1267125eef22cef3b63484c3241aaec6f48949b366d26c7250e0357"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b7408511fca48a82a119d78a77c2f5eb1b22fe88b0d2450ed0756d194fe7a9a"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14dc6f2d88192a67d708341f3085df6a4f5a0c7b03dec08d763ca2cd86e9f559"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48c640b99213643d141550326f34f0502fedb1798adb3c9eb79650b1ecb2f177"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0085da0f6c6393428bf0d9c08d8b1874d805bb55e17cb1dfa5ddb7cfb11140bf"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:964b16dcc10c79a4a2be9f1273fcc2684a9eedb3906439720598029a797b46e6"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7ce606c14bb195b0e5108544b540e2c5faed6843367e4ab3deb5c6aa5e681208"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40f029d73b10fac448c73d6eb33d57b34607f40116e9f6e9f0d32e9229b147d7"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3b8e6ea6be6d64104d8e9afc34c151926f8182f84e7ac290a93925c0db004bfd"}, + {file = "regex-2023.8.8-cp311-cp311-win32.whl", hash = "sha256:942f8b1f3b223638b02df7df79140646c03938d488fbfb771824f3d05fc083a8"}, + {file = "regex-2023.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:51d8ea2a3a1a8fe4f67de21b8b93757005213e8ac3917567872f2865185fa7fb"}, + {file = "regex-2023.8.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e951d1a8e9963ea51efd7f150450803e3b95db5939f994ad3d5edac2b6f6e2b4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704f63b774218207b8ccc6c47fcef5340741e5d839d11d606f70af93ee78e4d4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22283c769a7b01c8ac355d5be0715bf6929b6267619505e289f792b01304d898"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91129ff1bb0619bc1f4ad19485718cc623a2dc433dff95baadbf89405c7f6b57"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de35342190deb7b866ad6ba5cbcccb2d22c0487ee0cbb251efef0843d705f0d4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b993b6f524d1e274a5062488a43e3f9f8764ee9745ccd8e8193df743dbe5ee61"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3026cbcf11d79095a32d9a13bbc572a458727bd5b1ca332df4a79faecd45281c"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:293352710172239bf579c90a9864d0df57340b6fd21272345222fb6371bf82b3"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d909b5a3fff619dc7e48b6b1bedc2f30ec43033ba7af32f936c10839e81b9217"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3d370ff652323c5307d9c8e4c62efd1956fb08051b0e9210212bc51168b4ff56"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:b076da1ed19dc37788f6a934c60adf97bd02c7eea461b73730513921a85d4235"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e9941a4ada58f6218694f382e43fdd256e97615db9da135e77359da257a7168b"}, + {file = "regex-2023.8.8-cp36-cp36m-win32.whl", hash = "sha256:a8c65c17aed7e15a0c824cdc63a6b104dfc530f6fa8cb6ac51c437af52b481c7"}, + {file = "regex-2023.8.8-cp36-cp36m-win_amd64.whl", hash = "sha256:aadf28046e77a72f30dcc1ab185639e8de7f4104b8cb5c6dfa5d8ed860e57236"}, + {file = "regex-2023.8.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:423adfa872b4908843ac3e7a30f957f5d5282944b81ca0a3b8a7ccbbfaa06103"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ae594c66f4a7e1ea67232a0846649a7c94c188d6c071ac0210c3e86a5f92109"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e51c80c168074faa793685656c38eb7a06cbad7774c8cbc3ea05552d615393d8"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b7f4c66aa9d1522b06e31a54f15581c37286237208df1345108fcf4e050c18"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e73e5243af12d9cd6a9d6a45a43570dbe2e5b1cdfc862f5ae2b031e44dd95a8"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941460db8fe3bd613db52f05259c9336f5a47ccae7d7def44cc277184030a116"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f0ccf3e01afeb412a1a9993049cb160d0352dba635bbca7762b2dc722aa5742a"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2e9216e0d2cdce7dbc9be48cb3eacb962740a09b011a116fd7af8c832ab116ca"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cd9cd7170459b9223c5e592ac036e0704bee765706445c353d96f2890e816c8"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4873ef92e03a4309b3ccd8281454801b291b689f6ad45ef8c3658b6fa761d7ac"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:239c3c2a339d3b3ddd51c2daef10874410917cd2b998f043c13e2084cb191684"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1005c60ed7037be0d9dea1f9c53cc42f836188227366370867222bda4c3c6bd7"}, + {file = "regex-2023.8.8-cp37-cp37m-win32.whl", hash = "sha256:e6bd1e9b95bc5614a7a9c9c44fde9539cba1c823b43a9f7bc11266446dd568e3"}, + {file = "regex-2023.8.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9a96edd79661e93327cfeac4edec72a4046e14550a1d22aa0dd2e3ca52aec921"}, + {file = "regex-2023.8.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2181c20ef18747d5f4a7ea513e09ea03bdd50884a11ce46066bb90fe4213675"}, + {file = "regex-2023.8.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a2ad5add903eb7cdde2b7c64aaca405f3957ab34f16594d2b78d53b8b1a6a7d6"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9233ac249b354c54146e392e8a451e465dd2d967fc773690811d3a8c240ac601"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:920974009fb37b20d32afcdf0227a2e707eb83fe418713f7a8b7de038b870d0b"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd2b6c5dfe0929b6c23dde9624483380b170b6e34ed79054ad131b20203a1a63"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96979d753b1dc3b2169003e1854dc67bfc86edf93c01e84757927f810b8c3c93"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ae54a338191e1356253e7883d9d19f8679b6143703086245fb14d1f20196be9"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2162ae2eb8b079622176a81b65d486ba50b888271302190870b8cc488587d280"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c884d1a59e69e03b93cf0dfee8794c63d7de0ee8f7ffb76e5f75be8131b6400a"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf9273e96f3ee2ac89ffcb17627a78f78e7516b08f94dc435844ae72576a276e"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:83215147121e15d5f3a45d99abeed9cf1fe16869d5c233b08c56cdf75f43a504"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f7454aa427b8ab9101f3787eb178057c5250478e39b99540cfc2b889c7d0586"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0640913d2c1044d97e30d7c41728195fc37e54d190c5385eacb52115127b882"}, + {file = "regex-2023.8.8-cp38-cp38-win32.whl", hash = "sha256:0c59122ceccb905a941fb23b087b8eafc5290bf983ebcb14d2301febcbe199c7"}, + {file = "regex-2023.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:c12f6f67495ea05c3d542d119d270007090bad5b843f642d418eb601ec0fa7be"}, + {file = "regex-2023.8.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:82cd0a69cd28f6cc3789cc6adeb1027f79526b1ab50b1f6062bbc3a0ccb2dbc3"}, + {file = "regex-2023.8.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb34d1605f96a245fc39790a117ac1bac8de84ab7691637b26ab2c5efb8f228c"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:987b9ac04d0b38ef4f89fbc035e84a7efad9cdd5f1e29024f9289182c8d99e09"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dd6082f4e2aec9b6a0927202c85bc1b09dcab113f97265127c1dc20e2e32495"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb95fe8222932c10d4436e7a6f7c99991e3fdd9f36c949eff16a69246dee2dc"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7098c524ba9f20717a56a8d551d2ed491ea89cbf37e540759ed3b776a4f8d6eb"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b694430b3f00eb02c594ff5a16db30e054c1b9589a043fe9174584c6efa8033"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2aeab3895d778155054abea5238d0eb9a72e9242bd4b43f42fd911ef9a13470"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:988631b9d78b546e284478c2ec15c8a85960e262e247b35ca5eaf7ee22f6050a"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:67ecd894e56a0c6108ec5ab1d8fa8418ec0cff45844a855966b875d1039a2e34"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:14898830f0a0eb67cae2bbbc787c1a7d6e34ecc06fbd39d3af5fe29a4468e2c9"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f2200e00b62568cfd920127782c61bc1c546062a879cdc741cfcc6976668dfcf"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9691a549c19c22d26a4f3b948071e93517bdf86e41b81d8c6ac8a964bb71e5a6"}, + {file = "regex-2023.8.8-cp39-cp39-win32.whl", hash = "sha256:6ab2ed84bf0137927846b37e882745a827458689eb969028af8032b1b3dac78e"}, + {file = "regex-2023.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5543c055d8ec7801901e1193a51570643d6a6ab8751b1f7dd9af71af467538bb"}, + {file = "regex-2023.8.8.tar.gz", hash = "sha256:fcbdc5f2b0f1cd0f6a56cdb46fe41d2cce1e644e3b68832f3eeebc5fb0f7712e"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "reretry" +version = "0.11.8" +description = "An easy to use, but functional decorator for retrying on exceptions." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "reretry-0.11.8-py2.py3-none-any.whl", hash = "sha256:5ec1084cd9644271ee386d34cd5dd24bdb3e91d55961b076d1a31d585ad68a79"}, + {file = "reretry-0.11.8.tar.gz", hash = "sha256:f2791fcebe512ea2f1d153a2874778523a8064860b591cd90afc21a8bed432e3"}, +] + +[[package]] +name = "rfc3986" +version = "2.0.0" +description = "Validating URI References per RFC 3986" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, + {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, +] + +[package.extras] +idna2008 = ["idna"] + +[[package]] +name = "rich" +version = "13.5.2" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "main" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.5.2-py3-none-any.whl", hash = "sha256:146a90b3b6b47cac4a73c12866a499e9817426423f57c5a66949c086191a8808"}, + {file = "rich-13.5.2.tar.gz", hash = "sha256:fb9d6c0a0f643c99eed3875b5377a184132ba9be4d61516a55273d3554d75a39"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rtree" +version = "1.0.1" +description = "R-Tree spatial index for Python GIS" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Rtree-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9855b8f11cdad99c56eb361b7b632a4fbd3d8cbe3f2081426b445f0cfb7fdca9"}, + {file = "Rtree-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:18ce7e4d04b85c48f2d364835620b3b20e38e199639746e7b12f07a2303e18ff"}, + {file = "Rtree-1.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:784efa6b7be9e99b33613ae8495931032689441eabb6120c9b3eb91188c33794"}, + {file = "Rtree-1.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:157207191aebdacbbdbb369e698cfbfebce53bc97114e96c8af5bed3126475f1"}, + {file = "Rtree-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5fb3671a8d440c24b1dd29ec621d4345ced7185e26f02abe98e85a6629fcb50"}, + {file = "Rtree-1.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:11d16f51cf9205cd6995af36e24efe8f184270f667fb49bb69b09fc46b97e7d4"}, + {file = "Rtree-1.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6db6a0a93e41594ffc14b053f386dd414ab5a82535bbd9aedafa6ac8dc0650d8"}, + {file = "Rtree-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6e29e5eb3083ad12ac5c1ce6e37465ea3428d894d3466cc9c9e2ee4bf768e53"}, + {file = "Rtree-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:656b148589c0b5bab4a7db4d033634329f42a5feaac10ca40aceeca109d83c1f"}, + {file = "Rtree-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b2c15f9373ba314c83a8df5cb6d99b4e3af23c376c6b1317add995432dd0970"}, + {file = "Rtree-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93c5e0bf31e76b4f92a6eec3d2891e938408774c75a8ed6ac3d2c8db04a2be33"}, + {file = "Rtree-1.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6792de0e3c2fd3ad7e069445027603bec7a47000432f49c80246886311f4f152"}, + {file = "Rtree-1.0.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e131b570dc360a49e7f3b60e7bc6517943a54df056587964d1cb903889e7e"}, + {file = "Rtree-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:becd711fe97c2e09b1b7969e83080a3c8012bce2d30f6db879aade255fcba5c1"}, + {file = "Rtree-1.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:015df09e1bc55ddf7c88799bf1515d058cd0ee78eacf4cd443a32876d3b3a863"}, + {file = "Rtree-1.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c2973b76f61669a85e160b4ad09879c4089fc0e3f20fd99adf161ca298fe8374"}, + {file = "Rtree-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e4335e131a58952635560a003458011d97f9ea6f3c010dc24906050b42ee2c03"}, + {file = "Rtree-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:e7ca5d743f6a1dc62653dfac8ee7ce2e1ba91be7cf97916a7f60b7cbe48fb48d"}, + {file = "Rtree-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2ee7165e9872a026ccb868c021711eba39cedf7d1820763c9de52d5324691a92"}, + {file = "Rtree-1.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8de99f28af0f1783eefb80918959903b4b18112f6a12b48f296ecb162804e69d"}, + {file = "Rtree-1.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a94e2f4bf74bd202ea8b67ea3d7c71e763ad41f79be1d6b72aa2c8d5a8e92c4"}, + {file = "Rtree-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5120da3a1b96f3a7a17dd6af0afdd4e6f3cc9baa87e9ee0a272882f01f980bb"}, + {file = "Rtree-1.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7e3d5f0e7b28250afbb290ab88b49aa0f121c9714d0da2080581783690347507"}, + {file = "Rtree-1.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:296203e933b6ec0dd07f6a7456c4f1492def95b6993f20cc61c92b0fee0aecc5"}, + {file = "Rtree-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:77908cd7acdd519a731979ebf5baff8afd102109c2f52864c1e6ee75d3ea2d87"}, + {file = "Rtree-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1a213e5d385278ca7668bc5b27083f8d6e39996a9bd59b6528f3a30009dae4ed"}, + {file = "Rtree-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfa8cffec5cb9fed494c4bb335ebdb69b3c26178b0b685f67f79296c6b3d800c"}, + {file = "Rtree-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b31fd22d214160859d038da7cb2aaa27acb71efc24a7bcc75c84b5e502721549"}, + {file = "Rtree-1.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d68a81ad419d5c2ea5fecc677e6c178666c057e2c7b24100a6c48392196f1e9"}, + {file = "Rtree-1.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62f38020af47b765adc6b0bc7c4e810c6c3d1eab44ba339b592ff25a4c0dc0a7"}, + {file = "Rtree-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50b658a6707f215a0056d52e9f83a97148c0af62dea07cf29b3789a2c429e78a"}, + {file = "Rtree-1.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3573cbb0de872f54d0a0c29596a84e8ac3939c47ca3bece4a82e92775730a0d0"}, + {file = "Rtree-1.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5abe5a19d943a88bea14901970e4c53e4579fc2662404cdea6163bf4c04d49a"}, + {file = "Rtree-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1e894112cef4de6c518bdea0b43eada65f12888c3645cc437c3a677aa023039f"}, + {file = "Rtree-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:582854252b8fd5c8472478af060635434931fb55edd269bac128cbf2eef43620"}, + {file = "Rtree-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b54057e8a8ad92c1d8e9eaa5cf32aad70dde454abbf9b638e9d6024520a52c02"}, + {file = "Rtree-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:698de8ce6c62e159d93b35bacf64bcf3619077b5367bc88cd2cff5e0bc36169b"}, + {file = "Rtree-1.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:273ee61783de3a1664e5f868feebf5eea4629447137751bfa4087b0f82093082"}, + {file = "Rtree-1.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16900ee02cf5c198a42b03635268a80f606aa102f3f7618b89f75023d406da1c"}, + {file = "Rtree-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ce4a6fdb63254a4c1efebe7a4f7a59b1c333c703bde4ae715d9ad88c833e10b"}, + {file = "Rtree-1.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5b20f69e040a05503b22297af223f336fe7047909b57e4b207b98292f33a229f"}, + {file = "Rtree-1.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:57128293dd625cb1f07726f32208097953e8854d70ab1fc55d6858733618b9ed"}, + {file = "Rtree-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e898d7409ab645c25e06d4e058f99271182601d70b2887aba3351bf08e09a0c6"}, + {file = "Rtree-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ad9912faeddb1ddcec5e26b33089166d58a107af6862d8b7f1bb2b7c0002ab39"}, + {file = "Rtree-1.0.1.tar.gz", hash = "sha256:222121699c303a64065d849bf7038b1ecabc37b65c7fa340bedb38ef0e805429"}, +] + +[[package]] +name = "ruff" +version = "0.0.257" +description = "An extremely fast Python linter, written in Rust." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.0.257-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:7280640690c1d0046b20e0eb924319a89d8e22925d7d232180ce31196e7478f8"}, + {file = "ruff-0.0.257-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4582b73da61ab410ffda35b2987a6eacb33f18263e1c91810f0b9779ec4f41a9"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5acae9878f1136893e266348acdb9d30dfae23c296d3012043816432a5abdd51"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d9f0912d045eee15e8e02e335c16d7a7f9fb6821aa5eb1628eeb5bbfa3d88908"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9542c34ee5298b31be6c6ba304f14b672dcf104846ee65adb2466d3e325870"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3464f1ad4cea6c4b9325da13ae306bd22bf15d226e18d19c52db191b1f4355ac"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a54bfd559e558ee0df2a2f3756423fe6a9de7307bc290d807c3cdf351cb4c24"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3438fd38446e1a0915316f4085405c9feca20fe00a4b614995ab7034dbfaa7ff"}, + {file = "ruff-0.0.257-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:358cc2b547bd6451dcf2427b22a9c29a2d9c34e66576c693a6381c5f2ed3011d"}, + {file = "ruff-0.0.257-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:783390f1e94a168c79d7004426dae3e4ae2999cc85f7d00fdd86c62262b71854"}, + {file = "ruff-0.0.257-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:aaa3b5b6929c63a854b6bcea7a229453b455ab26337100b2905fae4523ca5667"}, + {file = "ruff-0.0.257-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4ecd7a84db4816df2dcd0f11c5365a9a2cf4fa70a19b3ac161b7b0bfa592959d"}, + {file = "ruff-0.0.257-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3db8d77d5651a2c0d307102d717627a025d4488d406f54c2764b21cfbe11d822"}, + {file = "ruff-0.0.257-py3-none-win32.whl", hash = "sha256:d2c8755fa4f6c5e5ec032ad341ca3beeecd16786e12c3f26e6b0cc40418ae998"}, + {file = "ruff-0.0.257-py3-none-win_amd64.whl", hash = "sha256:3cec07d6fecb1ebbc45ea8eeb1047b929caa2f7dfb8dd4b0e1869ff789326da5"}, + {file = "ruff-0.0.257-py3-none-win_arm64.whl", hash = "sha256:352f1bdb9b433b3b389aee512ffb0b82226ae1e25b3d92e4eaf0e7be6b1b6f6a"}, + {file = "ruff-0.0.257.tar.gz", hash = "sha256:fedfd06a37ddc17449203c3e38fc83fb68de7f20b5daa0ee4e60d3599b38bab0"}, +] + +[[package]] +name = "s3transfer" +version = "0.6.2" +description = "An Amazon S3 Transfer Manager" +category = "main" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.2-py3-none-any.whl", hash = "sha256:b014be3a8a2aab98cfe1abc7229cc5a9a0cf05eb9c1f2b86b230fd8df3f78084"}, + {file = "s3transfer-0.6.2.tar.gz", hash = "sha256:cab66d3380cca3e70939ef2255d01cd8aece6a4907a9528740f668c4b0611861"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "setuptools" +version = "68.1.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-68.1.2-py3-none-any.whl", hash = "sha256:3d8083eed2d13afc9426f227b24fd1659489ec107c0e86cec2ffdde5c92e790b"}, + {file = "setuptools-68.1.2.tar.gz", hash = "sha256:3d4dfa6d95f1b101d695a6160a7626e15583af71a5f52176efa5d39a054d475d"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5,<=7.1.2)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shapely" +version = "2.0.1" +description = "Manipulation and analysis of geometric objects" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b06d031bc64149e340448fea25eee01360a58936c89985cf584134171e05863f"}, + {file = "shapely-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9a6ac34c16f4d5d3c174c76c9d7614ec8fe735f8f82b6cc97a46b54f386a86bf"}, + {file = "shapely-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:865bc3d7cc0ea63189d11a0b1120d1307ed7a64720a8bfa5be2fde5fc6d0d33f"}, + {file = "shapely-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45b4833235b90bc87ee26c6537438fa77559d994d2d3be5190dd2e54d31b2820"}, + {file = "shapely-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce88ec79df55430e37178a191ad8df45cae90b0f6972d46d867bf6ebbb58cc4d"}, + {file = "shapely-2.0.1-cp310-cp310-win32.whl", hash = "sha256:01224899ff692a62929ef1a3f5fe389043e262698a708ab7569f43a99a48ae82"}, + {file = "shapely-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:da71de5bf552d83dcc21b78cc0020e86f8d0feea43e202110973987ffa781c21"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:502e0a607f1dcc6dee0125aeee886379be5242c854500ea5fd2e7ac076b9ce6d"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7d3bbeefd8a6a1a1017265d2d36f8ff2d79d0162d8c141aa0d37a87063525656"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f470a130d6ddb05b810fc1776d918659407f8d025b7f56d2742a596b6dffa6c7"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4641325e065fd3e07d55677849c9ddfd0cf3ee98f96475126942e746d55b17c8"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90cfa4144ff189a3c3de62e2f3669283c98fb760cfa2e82ff70df40f11cadb39"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70a18fc7d6418e5aea76ac55dce33f98e75bd413c6eb39cfed6a1ba36469d7d4"}, + {file = "shapely-2.0.1-cp311-cp311-win32.whl", hash = "sha256:09d6c7763b1bee0d0a2b84bb32a4c25c6359ad1ac582a62d8b211e89de986154"}, + {file = "shapely-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d8f55f355be7821dade839df785a49dc9f16d1af363134d07eb11e9207e0b189"}, + {file = "shapely-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:83a8ec0ee0192b6e3feee9f6a499d1377e9c295af74d7f81ecba5a42a6b195b7"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a529218e72a3dbdc83676198e610485fdfa31178f4be5b519a8ae12ea688db14"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91575d97fd67391b85686573d758896ed2fc7476321c9d2e2b0c398b628b961c"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b0d834b11be97d5ab2b4dceada20ae8e07bcccbc0f55d71df6729965f406ad"}, + {file = "shapely-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:b4f0711cc83734c6fad94fc8d4ec30f3d52c1787b17d9dca261dc841d4731c64"}, + {file = "shapely-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:05c51a29336e604c084fb43ae5dbbfa2c0ef9bd6fedeae0a0d02c7b57a56ba46"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b519cf3726ddb6c67f6a951d1bb1d29691111eaa67ea19ddca4d454fbe35949c"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:193a398d81c97a62fc3634a1a33798a58fd1dcf4aead254d080b273efbb7e3ff"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e55698e0ed95a70fe9ff9a23c763acfe0bf335b02df12142f74e4543095e9a9b"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32a748703e7bf6e92dfa3d2936b2fbfe76f8ce5f756e24f49ef72d17d26ad02"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a34a23d6266ca162499e4a22b79159dc0052f4973d16f16f990baa4d29e58b6"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d173d24e85e51510e658fb108513d5bc11e3fd2820db6b1bd0522266ddd11f51"}, + {file = "shapely-2.0.1-cp38-cp38-win32.whl", hash = "sha256:3cb256ae0c01b17f7bc68ee2ffdd45aebf42af8992484ea55c29a6151abe4386"}, + {file = "shapely-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c7eed1fb3008a8a4a56425334b7eb82651a51f9e9a9c2f72844a2fb394f38a6c"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ac1dfc397475d1de485e76de0c3c91cc9d79bd39012a84bb0f5e8a199fc17bef"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33403b8896e1d98aaa3a52110d828b18985d740cc9f34f198922018b1e0f8afe"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2569a4b91caeef54dd5ae9091ae6f63526d8ca0b376b5bb9fd1a3195d047d7d4"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a70a614791ff65f5e283feed747e1cc3d9e6c6ba91556e640636bbb0a1e32a71"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c43755d2c46b75a7b74ac6226d2cc9fa2a76c3263c5ae70c195c6fb4e7b08e79"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad81f292fffbd568ae71828e6c387da7eb5384a79db9b4fde14dd9fdeffca9a"}, + {file = "shapely-2.0.1-cp39-cp39-win32.whl", hash = "sha256:b50c401b64883e61556a90b89948297f1714dbac29243d17ed9284a47e6dd731"}, + {file = "shapely-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:bca57b683e3d94d0919e2f31e4d70fdfbb7059650ef1b431d9f4e045690edcd5"}, + {file = "shapely-2.0.1.tar.gz", hash = "sha256:66a6b1a3e72ece97fc85536a281476f9b7794de2e646ca8a4517e2e3c1446893"}, +] + +[package.dependencies] +numpy = ">=1.14" + +[package.extras] +docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "shellingham" +version = "1.5.3" +description = "Tool to Detect Surrounding Shell" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.3-py2.py3-none-any.whl", hash = "sha256:419c6a164770c9c7cfcaeddfacb3d31ac7a8db0b0f3e9c1287679359734107e9"}, + {file = "shellingham-1.5.3.tar.gz", hash = "sha256:cb4a6fec583535bc6da17b647dd2330cf7ef30239e05d547d99ae3705fd0f7f8"}, +] + +[[package]] +name = "simpleeval" +version = "0.9.13" +description = "A simple, safe single expression evaluator library." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "simpleeval-0.9.13-py2.py3-none-any.whl", hash = "sha256:22a2701a5006e4188d125d34accf2405c2c37c93f6b346f2484b6422415ae54a"}, + {file = "simpleeval-0.9.13.tar.gz", hash = "sha256:4a30f9cc01825fe4c719c785e3762623e350c4840d5e6855c2a8496baaa65fac"}, +] + +[[package]] +name = "simplejson" +version = "3.19.1" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +category = "main" +optional = false +python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "simplejson-3.19.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900"}, + {file = "simplejson-3.19.1-cp310-cp310-win32.whl", hash = "sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a"}, + {file = "simplejson-3.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc"}, + {file = "simplejson-3.19.1-cp311-cp311-win32.whl", hash = "sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb"}, + {file = "simplejson-3.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4"}, + {file = "simplejson-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2"}, + {file = "simplejson-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33"}, + {file = "simplejson-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3"}, + {file = "simplejson-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd"}, + {file = "simplejson-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a"}, + {file = "simplejson-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a"}, + {file = "simplejson-3.19.1-cp38-cp38-win32.whl", hash = "sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92"}, + {file = "simplejson-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553"}, + {file = "simplejson-3.19.1-cp39-cp39-win32.whl", hash = "sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4"}, + {file = "simplejson-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec"}, + {file = "simplejson-3.19.1-py3-none-any.whl", hash = "sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1"}, + {file = "simplejson-3.19.1.tar.gz", hash = "sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smart-open" +version = "6.3.0" +description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" +category = "main" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "smart_open-6.3.0-py3-none-any.whl", hash = "sha256:b4c9ae193ad6d3e7add50944b86afa0d150bd821ab8ec21edb26d9a06b66f6a8"}, + {file = "smart_open-6.3.0.tar.gz", hash = "sha256:d5238825fe9a9340645fac3d75b287c08fbb99fb2b422477de781c9f5f09e019"}, +] + +[package.extras] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests"] +azure = ["azure-common", "azure-core", "azure-storage-blob"] +gcs = ["google-cloud-storage (>=2.6.0)"] +http = ["requests"] +s3 = ["boto3"] +ssh = ["paramiko"] +test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "paramiko", "pytest", "pytest-rerunfailures", "requests", "responses"] +webhdfs = ["requests"] + +[[package]] +name = "smmap" +version = "5.0.0" +description = "A pure Python implementation of a sliding window memory map manager" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, +] + +[[package]] +name = "snakemake" +version = "7.32.0" +description = "Workflow management system to create reproducible and scalable data analyses" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "snakemake-7.32.0-py3-none-any.whl", hash = "sha256:e0ec7012320808e769dd4277ce7af12fe7e2726878b85f98a10987413aaecb8c"}, + {file = "snakemake-7.32.0.tar.gz", hash = "sha256:f9506a7160f9410eb398927bf0c73269c9d6857f40213c34cb7ac54bea8f53a5"}, +] + +[package.dependencies] +appdirs = "*" +configargparse = "*" +connection-pool = ">=0.0.3" +datrie = "*" +docutils = "*" +gitpython = "*" +humanfriendly = "*" +jinja2 = ">=3.0,<4.0" +jsonschema = "*" +nbformat = "*" +packaging = "*" +psutil = "*" +pulp = ">=2.0" +pyyaml = "*" +requests = "*" +reretry = "*" +smart-open = ">=3.0" +stopit = "*" +tabulate = "*" +throttler = "*" +toposort = ">=1.10" +wrapt = "*" +yte = ">=1.5.1,<2.0" + +[package.extras] +azure = ["azure-batch", "azure-core", "azure-identity", "azure-mgmt-batch", "azure-storage-blob"] +google-cloud = ["google-api-python-client", "google-cloud-storage", "google-crc32c", "oauth2client"] +messaging = ["slacker"] +pep = ["eido", "peppy"] +reports = ["pygments"] + +[[package]] +name = "snuggs" +version = "1.4.7" +description = "Snuggs are s-expressions for Numpy" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "snuggs-1.4.7-py3-none-any.whl", hash = "sha256:988dde5d4db88e9d71c99457404773dabcc7a1c45971bfbe81900999942d9f07"}, + {file = "snuggs-1.4.7.tar.gz", hash = "sha256:501cf113fe3892e14e2fee76da5cd0606b7e149c411c271898e6259ebde2617b"}, +] + +[package.dependencies] +numpy = "*" +pyparsing = ">=2.1.6" + +[package.extras] +test = ["hypothesis", "pytest"] + +[[package]] +name = "soupsieve" +version = "2.4.1" +description = "A modern CSS selector implementation for Beautiful Soup." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, + {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.20" +description = "Database Abstraction Library" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759b51346aa388c2e606ee206c0bc6f15a5299f6174d1e10cadbe4530d3c7a98"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1506e988ebeaaf316f183da601f24eedd7452e163010ea63dbe52dc91c7fc70e"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5768c268df78bacbde166b48be788b83dddaa2a5974b8810af422ddfe68a9bc8"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3f0dd6d15b6dc8b28a838a5c48ced7455c3e1fb47b89da9c79cc2090b072a50"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:243d0fb261f80a26774829bc2cee71df3222587ac789b7eaf6555c5b15651eed"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6eb6d77c31e1bf4268b4d61b549c341cbff9842f8e115ba6904249c20cb78a61"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-win32.whl", hash = "sha256:bcb04441f370cbe6e37c2b8d79e4af9e4789f626c595899d94abebe8b38f9a4d"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-win_amd64.whl", hash = "sha256:d32b5ffef6c5bcb452723a496bad2d4c52b346240c59b3e6dba279f6dcc06c14"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd81466bdbc82b060c3c110b2937ab65ace41dfa7b18681fdfad2f37f27acdd7"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe7d61dc71119e21ddb0094ee994418c12f68c61b3d263ebaae50ea8399c4d4"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4e571af672e1bb710b3cc1a9794b55bce1eae5aed41a608c0401885e3491179"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3364b7066b3c7f4437dd345d47271f1251e0cfb0aba67e785343cdbdb0fff08c"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1be86ccea0c965a1e8cd6ccf6884b924c319fcc85765f16c69f1ae7148eba64b"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1d35d49a972649b5080557c603110620a86aa11db350d7a7cb0f0a3f611948a0"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-win32.whl", hash = "sha256:27d554ef5d12501898d88d255c54eef8414576f34672e02fe96d75908993cf53"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-win_amd64.whl", hash = "sha256:411e7f140200c02c4b953b3dbd08351c9f9818d2bd591b56d0fa0716bd014f1e"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3c6aceebbc47db04f2d779db03afeaa2c73ea3f8dcd3987eb9efdb987ffa09a3"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d3f175410a6db0ad96b10bfbb0a5530ecd4fcf1e2b5d83d968dd64791f810ed"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea8186be85da6587456c9ddc7bf480ebad1a0e6dcbad3967c4821233a4d4df57"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c3d99ba99007dab8233f635c32b5cd24fb1df8d64e17bc7df136cedbea427897"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:76fdfc0f6f5341987474ff48e7a66c3cd2b8a71ddda01fa82fedb180b961630a"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-win32.whl", hash = "sha256:d3793dcf5bc4d74ae1e9db15121250c2da476e1af8e45a1d9a52b1513a393459"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-win_amd64.whl", hash = "sha256:79fde625a0a55220d3624e64101ed68a059c1c1f126c74f08a42097a72ff66a9"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:599ccd23a7146e126be1c7632d1d47847fa9f333104d03325c4e15440fc7d927"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1a58052b5a93425f656675673ef1f7e005a3b72e3f2c91b8acca1b27ccadf5f4"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79543f945be7a5ada9943d555cf9b1531cfea49241809dd1183701f94a748624"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63e73da7fb030ae0a46a9ffbeef7e892f5def4baf8064786d040d45c1d6d1dc5"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ce5e81b800a8afc870bb8e0a275d81957e16f8c4b62415a7b386f29a0cb9763"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb0d3e94c2a84215532d9bcf10229476ffd3b08f481c53754113b794afb62d14"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-win32.whl", hash = "sha256:8dd77fd6648b677d7742d2c3cc105a66e2681cc5e5fb247b88c7a7b78351cf74"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-win_amd64.whl", hash = "sha256:6f8a934f9dfdf762c844e5164046a9cea25fabbc9ec865c023fe7f300f11ca4a"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:26a3399eaf65e9ab2690c07bd5cf898b639e76903e0abad096cd609233ce5208"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4cde2e1096cbb3e62002efdb7050113aa5f01718035ba9f29f9d89c3758e7e4e"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b09ba72e4e6d341bb5bdd3564f1cea6095d4c3632e45dc69375a1dbe4e26ec"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b74eeafaa11372627ce94e4dc88a6751b2b4d263015b3523e2b1e57291102f0"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:77d37c1b4e64c926fa3de23e8244b964aab92963d0f74d98cbc0783a9e04f501"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:eefebcc5c555803065128401a1e224a64607259b5eb907021bf9b175f315d2a6"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-win32.whl", hash = "sha256:3423dc2a3b94125094897118b52bdf4d37daf142cbcf26d48af284b763ab90e9"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-win_amd64.whl", hash = "sha256:5ed61e3463021763b853628aef8bc5d469fe12d95f82c74ef605049d810f3267"}, + {file = "SQLAlchemy-2.0.20-py3-none-any.whl", hash = "sha256:63a368231c53c93e2b67d0c5556a9836fdcd383f7e3026a39602aad775b14acf"}, + {file = "SQLAlchemy-2.0.20.tar.gz", hash = "sha256:ca8a5ff2aa7f3ade6c498aaafce25b1eaeabe4e42b73e25519183e4566a16fc6"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.2.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3-binary"] + +[[package]] +name = "stevedore" +version = "5.1.0" +description = "Manage dynamic plugins for Python applications" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "stevedore-5.1.0-py3-none-any.whl", hash = "sha256:8cc040628f3cea5d7128f2e76cf486b2251a4e543c7b938f58d9a377f6694a2d"}, + {file = "stevedore-5.1.0.tar.gz", hash = "sha256:a54534acf9b89bc7ed264807013b505bf07f74dbe4bcfa37d32bd063870b087c"}, +] + +[package.dependencies] +pbr = ">=2.0.0,<2.1.0 || >2.1.0" + +[[package]] +name = "stopit" +version = "1.1.2" +description = "Timeout control decorator and context managers, raise any exception in another thread" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "stopit-1.1.2.tar.gz", hash = "sha256:f7f39c583fd92027bd9d06127b259aee7a5b7945c1f1fa56263811e1e766996d"}, +] + +[[package]] +name = "stringcase" +version = "1.2.0" +description = "String case converter." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "stringcase-1.2.0.tar.gz", hash = "sha256:48a06980661908efe8d9d34eab2b6c13aefa2163b3ced26972902e3bdfd87008"}, +] + +[[package]] +name = "tableschema" +version = "1.7.4" +description = "A utility library for working with Table Schema in Python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "tableschema-1.7.4-py2.py3-none-any.whl", hash = "sha256:6edea82839bfef161eacdbadca0a2c96a3b376070fecf5ebd81c020806e77104"}, + {file = "tableschema-1.7.4.tar.gz", hash = "sha256:f3f38c1143f881e3d34b96439859fd00e5688dc6fd0dc292c41d5812f140b669"}, +] + +[package.dependencies] +click = ">=3.3" +isodate = ">=0.5.4" +jsonschema = ">=2.5" +python-dateutil = ">=2.4" +requests = ">=2.5" +rfc3986 = ">=1.1.0" +six = ">=1.9" +tabulator = ">=1.20" +unicodecsv = ">=0.14" + +[package.extras] +develop = ["mock", "pylama", "pytest", "pytest-cov", "tox"] + +[[package]] +name = "tabula-py" +version = "2.7.0" +description = "Simple wrapper for tabula-java, read tables from PDF into DataFrame" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tabula-py-2.7.0.tar.gz", hash = "sha256:e95d8ff49fe2f86534006ae8bf38d54a9894ae70042831d70d5760800519bae8"}, + {file = "tabula_py-2.7.0-py3-none-any.whl", hash = "sha256:7151077fedd4a6976a1a4388e36774e87d7a421a4dd0775cc3da25f2fd43ebbe"}, +] + +[package.dependencies] +distro = "*" +numpy = "*" +pandas = ">=0.25.3" + +[package.extras] +dev = ["Flake8-pyproject", "black", "flake8", "isort", "mypy", "pytest"] +doc = ["sphinx", "sphinx-rtd-theme"] +test = ["pytest"] + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "tabulator" +version = "1.53.5" +description = "Consistent interface for stream reading and writing tabular data (csv/xls/json/etc)" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "tabulator-1.53.5-py2.py3-none-any.whl", hash = "sha256:6de83026f9dc8ac34dbc9e60cced1785ed2f4022ae0250eeaa59e92b5090f477"}, + {file = "tabulator-1.53.5.tar.gz", hash = "sha256:0e2887d0b36483a292e75b118d7f03e710c1337435553b5f62d2a7a891b7fe55"}, +] + +[package.dependencies] +boto3 = ">=1.9" +chardet = ">=3.0" +click = ">=6.0" +ijson = ">=3.0.3" +jsonlines = ">=1.1" +linear-tsv = ">=1.0" +openpyxl = ">=2.6" +requests = ">=2.8" +six = ">=1.9" +sqlalchemy = ">=0.9.6" +unicodecsv = ">=0.14" +xlrd = ">=1.0" + +[package.extras] +cchardet = ["cchardet (>=2.0)"] +datapackage = ["datapackage (>=1.12)"] +develop = ["mock", "moto[server]", "pylama", "pytest", "pytest-cov"] +html = ["pyquery (<1.4.2)"] +ods = ["ezodf (>=0.3)", "lxml (>=3.0)"] + +[[package]] +name = "tenacity" +version = "8.2.3" +description = "Retry code until it succeeds" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, + {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + +[[package]] +name = "text-unidecode" +version = "1.3" +description = "The most basic Text::Unidecode port" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] + +[[package]] +name = "throttler" +version = "1.2.2" +description = "Zero-dependency Python package for easy throttling with asyncio support" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "throttler-1.2.2-py3-none-any.whl", hash = "sha256:fc6ae612a2529e01110b32335af40375258b98e3b81232ec77cd07f51bf71392"}, + {file = "throttler-1.2.2.tar.gz", hash = "sha256:d54db406d98e1b54d18a9ba2b31ab9f093ac64a0a59d730c1cf7bb1cdfc94a58"}, +] + +[package.extras] +dev = ["aiohttp (>=3.8)", "codecov (>=2.1)", "flake8 (>=4.0)", "pytest (>=7.0)", "pytest-asyncio (>=0.16)", "pytest-cov (>=3.0)"] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomli" +version = "1.2.3" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, + {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, +] + +[[package]] +name = "tomlkit" +version = "0.12.1" +description = "Style preserving TOML library" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomlkit-0.12.1-py3-none-any.whl", hash = "sha256:712cbd236609acc6a3e2e97253dfc52d4c2082982a88f61b640ecf0817eab899"}, + {file = "tomlkit-0.12.1.tar.gz", hash = "sha256:38e1ff8edb991273ec9f6181244a6a391ac30e9f5098e7535640ea6be97a7c86"}, +] + +[[package]] +name = "toposort" +version = "1.10" +description = "Implements a topological sort algorithm." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "toposort-1.10-py3-none-any.whl", hash = "sha256:cbdbc0d0bee4d2695ab2ceec97fe0679e9c10eab4b2a87a9372b929e70563a87"}, + {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"}, +] + +[[package]] +name = "traitlets" +version = "5.9.0" +description = "Traitlets Python configuration system" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, + {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] + +[[package]] +name = "typer" +version = "0.9.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""} +rich = {version = ">=10.11.0,<14.0.0", optional = true, markers = "extra == \"all\""} +shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""} +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.11" +description = "Typing stubs for PyYAML" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, + {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, +] + +[[package]] +name = "types-requests" +version = "2.31.0.2" +description = "Typing stubs for requests" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, + {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, +] + +[package.dependencies] +types-urllib3 = "*" + +[[package]] +name = "types-urllib3" +version = "1.26.25.14" +description = "Typing stubs for urllib3" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, + {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, +] + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[[package]] +name = "unicodecsv" +version = "0.14.1" +description = "Python2's stdlib csv module is nice, but it doesn't support unicode. This module is a drop-in replacement which *does*." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "unicodecsv-0.14.1.tar.gz", hash = "sha256:018c08037d48649a0412063ff4eda26eaa81eff1546dbffa51fa5293276ff7fc"}, +] + +[[package]] +name = "urllib3" +version = "1.26.16" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"}, + {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "validators" +version = "0.21.2" +description = "Python Data Validation for Humans™" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "validators-0.21.2-py3-none-any.whl", hash = "sha256:6ad95131005a9d4c734a69dd4ef08cf66961e61222e60da25a9b5137cecd6fd4"}, + {file = "validators-0.21.2.tar.gz", hash = "sha256:002ba1552076535176824e43149c18c06f6b611bc8b597ddbcf8770bcf5f9f5c"}, +] + +[package.extras] +docs-offline = ["myst-parser (>=2.0.0)", "pypandoc-binary (>=1.11)", "sphinx (>=7.1.1)"] +docs-online = ["mkdocs (>=1.5.2)", "mkdocs-material (>=9.1.21)", "mkdocstrings[python] (>=0.22.0)", "pyaml (>=23.7.0)"] +hooks = ["pre-commit (>=3.3.3)"] +runner = ["tox (>=4.6.4)"] +sast = ["bandit[toml] (>=1.7.5)"] +testing = ["pytest (>=7.4.0)"] +tooling = ["black (>=23.7.0)", "pyright (>=1.1.320)", "ruff (>=0.0.280)"] +tooling-extras = ["pyaml (>=23.7.0)", "pypandoc-binary (>=1.11)", "pytest (>=7.4.0)"] + +[[package]] +name = "virtualenv" +version = "20.24.4" +description = "Virtual Python Environment builder" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.24.4-py3-none-any.whl", hash = "sha256:29c70bb9b88510f6414ac3e55c8b413a1f96239b6b789ca123437d5e892190cb"}, + {file = "virtualenv-20.24.4.tar.gz", hash = "sha256:772b05bfda7ed3b8ecd16021ca9716273ad9f4467c801f27e83ac73430246dca"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<4" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[[package]] +name = "watchdog" +version = "3.0.0" +description = "Filesystem events monitoring" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, + {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, + {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, + {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, + {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, + {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, + {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, + {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, + {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, + {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, + {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, + {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, + {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, + {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, + {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "wrapt" +version = "1.15.0" +description = "Module for decorators, wrappers and monkey patching." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, +] + +[[package]] +name = "xlrd" +version = "2.0.1" +description = "Library for developers to extract data from Microsoft Excel (tm) .xls spreadsheet files" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "xlrd-2.0.1-py2.py3-none-any.whl", hash = "sha256:6a33ee89877bd9abc1158129f6e94be74e2679636b8a205b43b85206c3f0bbdd"}, + {file = "xlrd-2.0.1.tar.gz", hash = "sha256:f72f148f54442c6b056bf931dbc34f986fd0c3b0b6b5a58d013c9aef274d0c88"}, +] + +[package.extras] +build = ["twine", "wheel"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "yte" +version = "1.5.1" +description = "A YAML template engine with Python expressions" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yte-1.5.1-py3-none-any.whl", hash = "sha256:fd646bc47c355f202f14b7476996de4a31501cf1e107ac7ad8e19edcd786d30b"}, + {file = "yte-1.5.1.tar.gz", hash = "sha256:6d0b315b78af83276d78f5f67c107c84238f772a76d74f4fc77905b46f3731f5"}, +] + +[package.dependencies] +dpath = ">=2.0,<3.0" +plac = ">=1.3.4,<2.0.0" +pyyaml = ">=6.0,<7.0" + +[[package]] +name = "zipp" +version = "3.16.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"}, + {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8,<3.12" +content-hash = "720714634109f6f723e75aef2252e9d00a6aeaeecaa57da495d12b9c6df5eb0b" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..43008eae --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,117 @@ +[tool.poetry] +name = "digipipe" +version = "1.0.0" +description = "Pipeline for data and energy system in the Digiplan project." +authors = [ + "Jonathan Amme ", + "Marie-Claire Gering ", + "David Starzl ", + "Aaron Schilling ", + "Hedwig Bartels ", +] +license = 'GNU AGPLv3' +classifiers = [ # Optional + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + "Development Status :: 5 - Production/Stable", + # Indicate who your project is intended for + "Intended Audience :: Developers", + "Topic :: Software Development :: Build Tools", + # Pick your license as you wish + "License :: OSI Approved :: AGPL License", + # Specify the Python versions you support here. In particular, ensure + # that you indicate whether you support Python 2, Python 3 or both. + # These classifiers are *not* checked by 'pip install'. See instead + # 'python_requires' below. + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", +] +readme = "README.md" +repository = "https://github.com/rl-institut/digipipe" + +[tool.poetry.urls] +"Bug Tracker" = "https://github.com/rl-institut/digipipe/issues" + +[tool.poetry.dependencies] +python = ">=3.8,<3.12" +pandas = "^1.5.3" +geopandas = "^0.12.2" +shapely = "^2.0.1" +snakemake = "7.32.0" +matplotlib = "^3.7.0" +pyyaml = "^6.0" +descartes = "^1.1.0" +fiona = "<=1.9" +rtree = "^1.0.1" +openpyxl = "3.1.0" +plotly = "^5.13.1" +rasterstats = "^0.18.0" +networkx = "^3.0" +pygraphviz = "^1.10" +pygments = "^2.14.0" +rasterio = "^1.3.6" +jinja2 = "3.0.3" +geopy = "^2.3.0" +oemoflex = { git = "https://git@github.com/rl-institut/oemoflex", tag = "0.0.1"} +ruff = "^0.0.257" +types-pyyaml = "^6.0.12.9" +pyxlsb = "^1.0.10" +tabula-py = "^2.7.0" +requests = "^2.30.0" +types-requests = "^2.30.0.0" +mkdocs = "^1.4.3" +mkdocs-material = "^9.1.12" +mkdocs-include-markdown-plugin = "^4.0.4" +mkdocs-same-dir = "^0.1.2" +mkdocs-exclude = "^1.0.2" +mkdocs-exclude-search = "^0.6.5" +boto3 = "1.26.125" + +[tool.poetry.dev-dependencies] +m2r2 = "^0.3.3" + + +# Testing +# ------------------------------------------------------------------------------ +# mypy = "^0.812" # https://github.com/python/mypy +pytest = "^6.2.4" # https://github.com/pytest-dev/pytest + + +# Code quality +# ------------------------------------------------------------------------------ +flake8 = "^3.9.2" # https://github.com/PyCQA/flake8 +pylint-django = "^2.3.0" # https://github.com/PyCQA/pylint-django # downgraded because https://github.com/PyCQA/pylint-django/issues/309 +pre-commit = "^2.13.0" # https://github.com/pre-commit/pre-commit +flake8-bandit = "^3.0.0" +flake8-bugbear = "^22.4.25" +flake8-builtins = "^1.5.3" +flake8-comprehensions = "^3.10.0" +darglint = "^1.8.1" +flake8-eradicate = "^1.2.1" +flake8-isort = "^4.1.1" +flake8-pytest-style = "^1.6.0" +black = "^21.12b0" +mypy = "^1.0.1" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.black] +line-length = 80 + +[tool.isort] +profile = "black" +line_length = 80 + +[tool.ruff] +line-length = 80 +ignore = [ + "F821", # Undefined name +] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 9a53b4b7..00000000 --- a/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -pandas -geopandas -shapely -snakemake -matplotlib -pyyaml -descartes -fiona -rtree -plotly -rasterstats -networkx -pygraphviz -pygments -rasterio diff --git a/setup.py b/setup.py deleted file mode 100644 index 70727073..00000000 --- a/setup.py +++ /dev/null @@ -1,51 +0,0 @@ -from setuptools import setup, find_packages -from os import path - -here = path.abspath(path.dirname(__file__)) - -# Get the long description from the README file -with open(path.join(here, "README.md"), encoding="utf-8") as f: - long_description = f.read() - -with open('requirements.txt') as f: - requirements = f.read().splitlines() - -setup( - name="digipipe", - version="0.0.0", - description="Pipeline for data and energy system in the Digiplan project.", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/rl-institut/digipipe/", - author="Reiner Lemoine Institut", - author_email='jonathan.amme@rl-institut.de', - license='GNU AGPLv3', - classifiers=[ # Optional - # How mature is this project? Common values are - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable - "Development Status :: 3 - Alpha", - # Indicate who your project is intended for - "Intended Audience :: Developers", - "Topic :: Software Development :: Build Tools", - # Pick your license as you wish - "License :: OSI Approved :: AGPL License", - # Specify the Python versions you support here. In particular, ensure - # that you indicate whether you support Python 2, Python 3 or both. - # These classifiers are *not* checked by 'pip install'. See instead - # 'python_requires' below. - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - ], - packages=find_packages(), - python_requires=">=3, <4", - install_requires=requirements, - #extras_require={"dev": [], "test": []}, # Optional - project_urls={ # Optional - "Bug Reports": "https://github.com/rl-institut/digipipe/issues", - "Source": "https://github.com/rl-institut/digipipe", - }, -)