From 60bbd11a3e67567d7c211dffeb1b0ec0e701ad3e Mon Sep 17 00:00:00 2001 From: Kamran Date: Sat, 2 Mar 2024 10:03:07 -0800 Subject: [PATCH 1/5] update test yaml structure --- workflow/tests/test_yaml_structure.py | 35 ++++++++++++++++++--------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/workflow/tests/test_yaml_structure.py b/workflow/tests/test_yaml_structure.py index 099593d8..958da1d8 100644 --- a/workflow/tests/test_yaml_structure.py +++ b/workflow/tests/test_yaml_structure.py @@ -6,33 +6,34 @@ def load_yaml_file(filepath): return yaml.safe_load(file) -def compare_structures(data1, data2, path=""): +def compare_structures(data1, data2, path="", yaml_name=""): if type(data1) != type(data2): print( - f"Type mismatch at {path}: {type(data1).__name__} vs {type(data2).__name__}", + f"Type mismatch at {path} in {yaml_name}: {type(data1).__name__} vs {type(data2).__name__}", ) return False if isinstance(data1, dict): for key in data1: if key not in data2: - print(f"Missing key in second structure at {path}: {key}") + print(f"Missing key '{key}' in second structure at {path} in {yaml_name}") continue compare_structures( data1[key], data2[key], path=f"{path}.{key}" if path else key, + yaml_name=yaml_name, ) for key in data2: if key not in data1: - print(f"Missing key in first structure at {path}: {key}") + print(f"Missing key '{key}' in first structure at {path} in {yaml_name}") return True elif isinstance(data1, list): # For simplicity, just compare the first item if it exists, assuming homogeneous lists if data1 and data2: - compare_structures(data1[0], data2[0], path=f"{path}[0]") + compare_structures(data1[0], data2[0], path=f"{path}[0]", yaml_name=yaml_name) elif not data1 and data2 or data1 and not data2: - print(f"List length mismatch or one is empty at {path}") + print(f"List length mismatch or one is empty at {path} in {yaml_name}") return True else: # This part ignores values if they are not container types @@ -44,14 +45,26 @@ def test_yaml_structure(filepath1, filepath2): data2 = load_yaml_file(filepath2) print("Comparing structure...") - if compare_structures(data1, data2): - print("The structures match.") + if compare_structures(data1, data2, yaml_name=filepath1): + print(f"The structures in {filepath1} and {filepath2} match.") else: - print("The structures do not match.") + print(f"The structures in {filepath1} and {filepath2} do not match.") - -# Example usage +print("\n") test_yaml_structure( "../config/tests/config.test_simple.yaml", "../config/config.default.yaml", ) + +print("\n") +test_yaml_structure( + "../config/tests/config.test.yaml", + "../config/config.default.yaml", +) + +print("\n") +test_yaml_structure( + "../config/tests/config.validation.yaml", + "../config/config.default.yaml", +) +print("\n") \ No newline at end of file From 352a120d6bd1b9d330b1eb20ec0fc108055fdf0b Mon Sep 17 00:00:00 2001 From: Kamran Date: Sat, 2 Mar 2024 10:03:39 -0800 Subject: [PATCH 2/5] update validation config --- workflow/config/tests/config.validation.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/workflow/config/tests/config.validation.yaml b/workflow/config/tests/config.validation.yaml index 23883785..5ccf4b0b 100644 --- a/workflow/config/tests/config.validation.yaml +++ b/workflow/config/tests/config.validation.yaml @@ -10,8 +10,8 @@ run: # docs : scenario: - interconnect: [western, texas] #"usa|texas|western|eastern" - clusters: [40] + interconnect: [western] #"usa|texas|western|eastern" + clusters: [300] opts: [Co2L2.0] ll: [v1.0] scope: "total" # "urban", "rural", or "total" @@ -73,6 +73,7 @@ electricity: co2base: 226.86e+6 #base_from_2020 Locations of the 250 MMmt of CO2 emissions from the WECC 2021. gaslimit: false # global gas usage limit of X MWh_th retirement: economic # "economic" or "technical" + SAFE_reservemargin: 0.15 operational_reserve: activate: false @@ -270,7 +271,7 @@ clustering: ramp_limit_down: max focus_weights: - # California: 0.5 + California: 0.5 # docs : solving: From f8077098a55773aa62d47dd7bb1a7021413be0a3 Mon Sep 17 00:00:00 2001 From: Kamran Date: Mon, 4 Mar 2024 18:36:30 -0800 Subject: [PATCH 3/5] Update default config --- workflow/Snakefile | 4 ++-- workflow/config/config.default.yaml | 6 +++--- workflow/rules/solve_electricity.smk | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/workflow/Snakefile b/workflow/Snakefile index 8642b5e5..3361abc8 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -64,8 +64,8 @@ wildcard_constraints: # Merge subworkflow configs and main config # configfile: "config/tests/config.validation.yaml" -configfile: "config/tests/config.test_simple.yaml" -# configfile: "config/config.default.yaml" +# configfile: "config/tests/config.test_simple.yaml" +configfile: "config/config.default.yaml" configfile: "config/config.cluster.yaml" configfile: "config/config.osw.yaml" configfile: "config/config.plotting.yaml" diff --git a/workflow/config/config.default.yaml b/workflow/config/config.default.yaml index 2fa91862..2974413f 100644 --- a/workflow/config/config.default.yaml +++ b/workflow/config/config.default.yaml @@ -12,8 +12,8 @@ run: scenario: interconnect: western #"usa|texas|western|eastern" clusters: [100] - opts: [RCo2L-SAFER-CCL] - ll: [v1.15] + opts: [RCo2L-SAFER-RPS] + ll: [v1.0, v1.05] scope: "total" # "urban", "rural", or "total" sector: "" # G planning_horizons: @@ -263,7 +263,7 @@ clustering: algorithm: kmeans # choose from: [hac, kmeans] feature: solar+onwind-time # only for hac. choose from: [solar+onwind-time, solar+onwind-cap, solar-time, solar-cap, solar+offwind-cap] etc. cluster_network: - algorithm: kmeans + algorithm: hac # choose from: [hac, kmeans] feature: solar+onwind-time aggregation_zones: 'state' # [balancing_area, state] exclude_carriers: [] diff --git a/workflow/rules/solve_electricity.smk b/workflow/rules/solve_electricity.smk index a447978c..9835770e 100644 --- a/workflow/rules/solve_electricity.smk +++ b/workflow/rules/solve_electricity.smk @@ -28,7 +28,7 @@ rule solve_network: BENCHMARKS + "solve_network/{interconnect}/elec_s_{clusters}_ec_l{ll}_{opts}_{sector}" ) - threads: 4 + threads: 8 resources: mem_mb=memory, walltime=config["solving"].get("walltime", "12:00:00"), From 9eb0edf3082d9ae27d4532a5f016bd398b046a8a Mon Sep 17 00:00:00 2001 From: ktehranchi <83722342+ktehranchi@users.noreply.github.com> Date: Mon, 4 Mar 2024 19:37:31 -0800 Subject: [PATCH 4/5] update docs --- docs/source/config-wildcards.md | 37 ++++++++++++++++--------------- docs/source/configtables/opts.csv | 29 ++++++++++++------------ 2 files changed, 34 insertions(+), 32 deletions(-) diff --git a/docs/source/config-wildcards.md b/docs/source/config-wildcards.md index 03c2b91c..7dd5b8c0 100644 --- a/docs/source/config-wildcards.md +++ b/docs/source/config-wildcards.md @@ -26,24 +26,6 @@ A visual representation of each `{interconnect}` is shown below: :scale: 100 % ``` -(cutout_wc)= -## The `{cutout}` wildcard - -The `{cutout}` wildcard facilitates running the rule :mod:`build_cutout` -for all cutout configurations specified under `atlite: cutouts:`. Each cutout -is descibed in the form `{dataset}_{year}`. These cutouts will be stored in a -folder specified by `{cutout}`. - -Valid dataset names include: `era5` -Valid years can be from `1940` to `2022` - -```{note} -Data for `era5_2019` has been pre-pared for the user and will be automatically downloaded -during the workflow. If other years are needed, the user will need to prepaer the -cutout themself. -``` - - @@ -104,6 +86,7 @@ currently: :file: configtables/opts.csv ``` + (sector)= ## The `{sector}` wildcard @@ -127,3 +110,21 @@ Used in the following rules: - `build_heat_demands` - `build_temperature_profiles` - `build_solar_thermal_profiles` + + +(cutout_wc)= +## The `{cutout}` wildcard + +The `{cutout}` wildcard facilitates running the rule :mod:`build_cutout` +for all cutout configurations specified under `atlite: cutouts:`. Each cutout +is descibed in the form `{dataset}_{year}`. These cutouts will be stored in a +folder specified by `{cutout}`. + +Valid dataset names include: `era5` +Valid years can be from `1940` to `2022` + +```{note} +Data for `era5_2019` has been pre-pared for the user and will be automatically downloaded +during the workflow. If other years are needed, the user will need to prepaer the +cutout themself. +``` diff --git a/docs/source/configtables/opts.csv b/docs/source/configtables/opts.csv index 8da8c456..36ae207b 100644 --- a/docs/source/configtables/opts.csv +++ b/docs/source/configtables/opts.csv @@ -1,14 +1,15 @@ -Trigger, Description, Definition, Status, -``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use, -``nSEG``; e.g. ``4380SEG``," ""Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables"," hydro inflow and load.""", ``prepare_network``: apply_time_segmentation(), In active use -``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use, -``RCo2L``, Add regional carbon-dioxide emissions limits configured in ``electricity: regional_Co2_limits``. These can be specified in the file linked at ``electricity: regional_Co2_limits`` in the configuration. File defaults to ``repo_data/regional_co2_limits.csv``., ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use, -``RPS``,"Add Renewable Portfolio Standard (RPS) constraints to shares of generator production (MWh) per carrier for individual countries. Standards can be set for single carriers or groups of carriers. Each constraint can be designated for a specified planning horizon in multi-period models. Opts and path for portfolio_standards.csv must be defined", ``solve_network``, In active use, -``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use, -``CCL``,"Add minimum or maximum levels of generator nominal capacity per carrier for individual countries. Each constraint can be designated for a specified planning horizon in multi-period models. Opts and path for agg_p_nom_minmax.csv must be defined", ``solve_network``, In active use, -``EQ``," ""Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.""", ``solve_network``, In active use, -``ATK``," ""Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.""", ``prepare_network``, In active use, -``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested, -``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested, -``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use, -``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use, +Trigger, Description, Definition, Status +``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use +``nSEG``; e.g. ``4380SEG``, Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables hydro inflow and load., ``prepare_network``: apply_time_segmentation(),In active use +``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use +``RCo2L``, Add regional carbon-dioxide emissions limits configured in ``electricity: regional_Co2_limits``. These can be specified in the file linked at ``electricity: regional_Co2_limits`` in the configuration. File defaults to ``repo_data/regional_co2_limits.csv``., ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use +``RPS``,Add Renewable Portfolio Standard (RPS) constraints to shares of generator production (MWh) per carrier for individual countries. Standards can be set for single carriers or groups of carriers. Each constraint can be designated for a specified planning horizon in multi-period models. Opts and path for portfolio_standards.csv must be defined, ``solve_network``, In active use +``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__,In active use +``SAFER``,Adds Regional Capacity Reserve Margin (SAFE) Constraints for defined region- set to a percentage above peak demand level for which renewables and storage do not contribute to. , ``solve_network``,In active use +``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use +``CCL``,Add minimum or maximum levels of generator nominal capacity per carrier for individual countries. Each constraint can be designated for a specified planning horizon in multi-period models. Opts and path for agg_p_nom_minmax.csv must be defined, ``solve_network``, In active use +``EQ``," ""Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.""", ``solve_network``, In active use +``ATK``," ""Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.""", ``prepare_network``, In active use +``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested +``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use \ No newline at end of file From 5e4bb678b0f4dde7bc9d4097fdf0676202ea8e9f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 03:39:25 +0000 Subject: [PATCH 5/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .gitignore | 1 - .test_sh | 2 +- docs/source/configtables/opts.csv | 2 +- docs/source/rules-retrieving-data.md | 2 +- workflow/Snakefile | 1 + workflow/config/config.default.yaml | 2 +- .../policy_constraints/SAFE_regional_prm.csv | 24 ++-- .../policy_constraints/agg_p_nom_minmax.csv | 4 +- .../portfolio_standards.csv | 2 +- .../regional_Co2_limits.csv | 2 +- workflow/config/tests/config.test.yaml | 2 +- workflow/config/tests/config.test_simple.yaml | 2 +- workflow/config/tests/config.validation.yaml | 6 +- workflow/rules/build_electricity.smk | 3 +- workflow/rules/postprocess.smk | 1 - workflow/rules/retrieve.smk | 9 +- workflow/scripts/add_electricity.py | 36 ++++-- workflow/scripts/build_fuel_prices.py | 65 ++++++---- .../scripts/plot_validation_production.py | 36 ++++-- workflow/scripts/prepare_network.py | 48 ++++--- workflow/scripts/retrieve_caiso_data.py | 98 ++++++++++----- workflow/scripts/retrieve_eia_data.py | 1 - workflow/scripts/solve_network_local.py | 119 +++++++++++------- workflow/tests/test_yaml_structure.py | 15 ++- 24 files changed, 304 insertions(+), 179 deletions(-) diff --git a/.gitignore b/.gitignore index 472176b9..fb78b2c6 100644 --- a/.gitignore +++ b/.gitignore @@ -268,4 +268,3 @@ connect.sh config/config.cluster.yaml /workflow/repo_data/dag.png !.pre-commit-config.yaml - diff --git a/.test_sh b/.test_sh index 127d92b8..d8516250 100644 --- a/.test_sh +++ b/.test_sh @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors # ADAPTED FOR PyPSA-USA # SPDX-License-Identifier: CC0-1.0 diff --git a/docs/source/configtables/opts.csv b/docs/source/configtables/opts.csv index 36ae207b..f49fa119 100644 --- a/docs/source/configtables/opts.csv +++ b/docs/source/configtables/opts.csv @@ -12,4 +12,4 @@ Trigger, Description, Definition, Status ``ATK``," ""Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.""", ``prepare_network``, In active use ``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested ``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use -``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use \ No newline at end of file +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use diff --git a/docs/source/rules-retrieving-data.md b/docs/source/rules-retrieving-data.md index 9a81a951..853a8f2b 100644 --- a/docs/source/rules-retrieving-data.md +++ b/docs/source/rules-retrieving-data.md @@ -158,4 +158,4 @@ Documentation of the configuration file ``config/config.yaml`` at ## Rule `retrieve_caiso_data` ```{eval-rst} .. automodule:: retrieve_caiso_data -``` \ No newline at end of file +``` diff --git a/workflow/Snakefile b/workflow/Snakefile index 3361abc8..bc70fd99 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -92,6 +92,7 @@ include: "rules/solve_electricity.smk" include: "rules/postprocess.smk" include: "rules/validate.smk" + if "E" not in config["scenario"]["sector"]: if not config["scenario"]["sector"]: config["scenario"]["sector"] = "E" diff --git a/workflow/config/config.default.yaml b/workflow/config/config.default.yaml index 2974413f..51b3eb68 100644 --- a/workflow/config/config.default.yaml +++ b/workflow/config/config.default.yaml @@ -75,7 +75,7 @@ electricity: retirement: economic # "economic" or "technical" SAFE_reservemargin: 0.14 regional_Co2_limits: 'config/policy_constraints/regional_Co2_limits.csv' - agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' + agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' portfolio_standards: 'config/policy_constraints/portfolio_standards.csv' SAFE_regional_reservemargins: 'config/policy_constraints/SAFE_regional_prm.csv' diff --git a/workflow/config/policy_constraints/SAFE_regional_prm.csv b/workflow/config/policy_constraints/SAFE_regional_prm.csv index 6bcf429f..598bb0ec 100644 --- a/workflow/config/policy_constraints/SAFE_regional_prm.csv +++ b/workflow/config/policy_constraints/SAFE_regional_prm.csv @@ -1,12 +1,12 @@ -region, prm -California, 0.14 -Oregon, 0.14 -Washington, 0.14 -Colorado, 0.10 -New Mexico, 0.14 -Arizona, 0.14 -Utah, 0.19 -Wyoming, 0.14 -Idaho, 0.14 -Texas, 0.14 -Montana, 0.14 \ No newline at end of file +region, prm +California, 0.14 +Oregon, 0.14 +Washington, 0.14 +Colorado, 0.10 +New Mexico, 0.14 +Arizona, 0.14 +Utah, 0.19 +Wyoming, 0.14 +Idaho, 0.14 +Texas, 0.14 +Montana, 0.14 diff --git a/workflow/config/policy_constraints/agg_p_nom_minmax.csv b/workflow/config/policy_constraints/agg_p_nom_minmax.csv index d40016a4..8f9c0890 100644 --- a/workflow/config/policy_constraints/agg_p_nom_minmax.csv +++ b/workflow/config/policy_constraints/agg_p_nom_minmax.csv @@ -1,2 +1,2 @@ -planning_horizon,region,carrier,min,max -2030,California,onwind,0, \ No newline at end of file +planning_horizon,region,carrier,min,max +2030,California,onwind,0, diff --git a/workflow/config/policy_constraints/portfolio_standards.csv b/workflow/config/policy_constraints/portfolio_standards.csv index db9b7c56..21ccb497 100644 --- a/workflow/config/policy_constraints/portfolio_standards.csv +++ b/workflow/config/policy_constraints/portfolio_standards.csv @@ -12,4 +12,4 @@ planning_horizon,region,carrier,pct 2040,New Mexico,"onwind, solar, offwind_floating, geothermal, offwind, hydro",0.5 2040,Nevada,"onwind, solar, offwind_floating, geothermal, offwind, hydro",0.5 2040,Oregon,"onwind, solar, offwind_floating, geothermal, offwind, hydro",0.93 -2040,Washington,"onwind, solar, offwind_floating, geothermal, offwind, hydro",1 \ No newline at end of file +2040,Washington,"onwind, solar, offwind_floating, geothermal, offwind, hydro",1 diff --git a/workflow/config/policy_constraints/regional_Co2_limits.csv b/workflow/config/policy_constraints/regional_Co2_limits.csv index 67ba38db..35c0bdbb 100644 --- a/workflow/config/policy_constraints/regional_Co2_limits.csv +++ b/workflow/config/policy_constraints/regional_Co2_limits.csv @@ -1,3 +1,3 @@ region,planning_horizon,limit,import_emissions_factor California,2030,30000000,0.428 -California,2040,18000000,0.428 \ No newline at end of file +California,2040,18000000,0.428 diff --git a/workflow/config/tests/config.test.yaml b/workflow/config/tests/config.test.yaml index b9c3da9e..f5de24b0 100644 --- a/workflow/config/tests/config.test.yaml +++ b/workflow/config/tests/config.test.yaml @@ -78,7 +78,7 @@ electricity: retirement: economic # "economic" or "technical" SAFE_reservemargin: 0.14 regional_Co2_limits: 'config/policy_constraints/regional_Co2_limits.csv' - agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' + agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' portfolio_standards: 'config/policy_constraints/portfolio_standards.csv' SAFE_regional_reservemargins: 'config/policy_constraints/SAFE_regional_prm.csv' diff --git a/workflow/config/tests/config.test_simple.yaml b/workflow/config/tests/config.test_simple.yaml index 29441f5e..b9362533 100644 --- a/workflow/config/tests/config.test_simple.yaml +++ b/workflow/config/tests/config.test_simple.yaml @@ -74,7 +74,7 @@ electricity: retirement: economic # "economic" or "technical" SAFE_reservemargin: 0.14 regional_Co2_limits: 'config/policy_constraints/regional_Co2_limits.csv' - agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' + agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' portfolio_standards: 'config/policy_constraints/portfolio_standards.csv' SAFE_regional_reservemargins: 'config/policy_constraints/SAFE_regional_prm.csv' diff --git a/workflow/config/tests/config.validation.yaml b/workflow/config/tests/config.validation.yaml index 85ea2c3d..c0c6ed15 100644 --- a/workflow/config/tests/config.validation.yaml +++ b/workflow/config/tests/config.validation.yaml @@ -75,7 +75,7 @@ electricity: retirement: economic # "economic" or "technical" SAFE_reservemargin: 0.14 regional_Co2_limits: 'config/policy_constraints/regional_Co2_limits.csv' - agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' + agg_p_nom_limits: 'config/policy_constraints/agg_p_nom_minmax.csv' portfolio_standards: '../config/policy_constraints/portfolio_standards.csv' SAFE_regional_reservemargins: 'config/policy_constraints/SAFE_regional_prm.csv' @@ -91,8 +91,8 @@ electricity: extendable_carriers: Generator: [] - StorageUnit: [] - Store: [] + StorageUnit: [] + Store: [] Link: [] demand: #EFS used for given planning_horizons year diff --git a/workflow/rules/build_electricity.smk b/workflow/rules/build_electricity.smk index b737c7f7..636ae6c2 100644 --- a/workflow/rules/build_electricity.smk +++ b/workflow/rules/build_electricity.smk @@ -241,6 +241,7 @@ rule build_demand: script: "../scripts/build_demand.py" + rule build_fuel_prices: params: snapshots=config["snapshots"], @@ -262,7 +263,6 @@ rule build_fuel_prices: "../scripts/build_fuel_prices.py" - rule add_electricity: params: length_factor=config["lines"]["length_factor"], @@ -313,7 +313,6 @@ rule add_electricity: demand=RESOURCES + "{interconnect}/demand.csv", fuel_costs="repo_data/eia_mappings/fuelCost22.csv", ng_electric_power_price=RESOURCES + "{interconnect}/ng_fuel_prices.csv", - output: RESOURCES + "{interconnect}/elec_base_network_l_pp.nc", log: diff --git a/workflow/rules/postprocess.smk b/workflow/rules/postprocess.smk index a37d0741..f3598fe2 100644 --- a/workflow/rules/postprocess.smk +++ b/workflow/rules/postprocess.smk @@ -47,4 +47,3 @@ rule plot_figures: mem_mb=5000, script: "../scripts/plot_figures.py" - diff --git a/workflow/rules/retrieve.smk b/workflow/rules/retrieve.smk index c814c8ff..4c03aab9 100644 --- a/workflow/rules/retrieve.smk +++ b/workflow/rules/retrieve.smk @@ -191,17 +191,18 @@ rule retrieve_cost_data_usa: script: "../scripts/retrieve_cost_data_usa.py" + rule retrieve_caiso_data: params: - fuel_year= config['costs']['ng_fuel_year'] + fuel_year=config["costs"]["ng_fuel_year"], input: - fuel_regions = 'repo_data/wecc_fuelregions.xlsx' + fuel_regions="repo_data/wecc_fuelregions.xlsx", output: - fuel_prices=DATA + "costs/ng_caiso_prices.csv" + fuel_prices=DATA + "costs/ng_caiso_prices.csv", log: LOGS + "retrieve_caiso_data.log", shadow: - "minimal", + "minimal" resources: mem_mb=2000, script: diff --git a/workflow/scripts/add_electricity.py b/workflow/scripts/add_electricity.py index baba942a..3ed16fbf 100755 --- a/workflow/scripts/add_electricity.py +++ b/workflow/scripts/add_electricity.py @@ -2,9 +2,9 @@ """ **Description** -This module integrates data produced by `build_renewable_profiles`, `build_demand`, `build_cost_data`, `build_fuel_prices`, and `build_base_network` to create a network model that includes generators, demand, and costs. The module attaches generators, storage units, and loads to the network created by `build_base_network`. Each generator is assigned regional capital costs, and regional and daily or monthly marginal costs. +This module integrates data produced by `build_renewable_profiles`, `build_demand`, `build_cost_data`, `build_fuel_prices`, and `build_base_network` to create a network model that includes generators, demand, and costs. The module attaches generators, storage units, and loads to the network created by `build_base_network`. Each generator is assigned regional capital costs, and regional and daily or monthly marginal costs. -Extendable generators are assigned a maximum capacity based on land-use constraints defined in `build_renewable_profiles`. +Extendable generators are assigned a maximum capacity based on land-use constraints defined in `build_renewable_profiles`. **Relevant Settings** @@ -35,7 +35,6 @@ **Outputs** - ``networks/elec_base_network_l_pp.nc`` - """ @@ -328,15 +327,32 @@ def update_marginal_costs( missed = [] for fuel_region_type in ["balancing_area", "state"]: - + # map generators to fuel_region_type (state or BA) bus_region_mapper = n.buses.to_dict()[fuel_region_type] - gen = n.generators[n.generators.carrier == carrier].copy() if fuel_region_type == "balancing_area" else missed - gen[f'{fuel_region_type}'] = gen.bus.map(bus_region_mapper) - gen[f'{fuel_region_type}'] = gen[f'{fuel_region_type}'].replace({"CISO-PGAE": "CISO", "CISO-SCE": "CISO", "CISO-SDGE":"CISO","CISO-VEA":"CISO", "Arizona": "AZPS", "NYISO": "NYISO", "CAISO": "CAISO", "BANC":"BANCSMUD"}) + gen = ( + n.generators[n.generators.carrier == carrier].copy() + if fuel_region_type == "balancing_area" + else missed + ) + gen[f"{fuel_region_type}"] = gen.bus.map(bus_region_mapper) + gen[f"{fuel_region_type}"] = gen[f"{fuel_region_type}"].replace( + { + "CISO-PGAE": "CISO", + "CISO-SCE": "CISO", + "CISO-SDGE": "CISO", + "CISO-VEA": "CISO", + "Arizona": "AZPS", + "NYISO": "NYISO", + "CAISO": "CAISO", + "BANC": "BANCSMUD", + } + ) missed = gen[~gen[fuel_region_type].isin(fuel_costs.columns.unique())] - gen = gen[gen[fuel_region_type].isin(fuel_costs.columns.unique())] #Filter for BAs which we have the fuel price data for + gen = gen[ + gen[fuel_region_type].isin(fuel_costs.columns.unique()) + ] # Filter for BAs which we have the fuel price data for # Can add block here that pulls in the state level data for Missing CAISO data. @@ -350,7 +366,9 @@ def update_marginal_costs( # fuel_costs.set_index(fuel_region_type, inplace=True) for fuel_region in gen[fuel_region_type].unique(): gens_in_region = gen[gen[fuel_region_type] == fuel_region].index.to_list() - dfs.append(pd.DataFrame({gen_: fuel_costs[fuel_region] for gen_ in gens_in_region}),) + dfs.append( + pd.DataFrame({gen_: fuel_costs[fuel_region] for gen_ in gens_in_region}) + ) df = pd.concat(dfs, axis=1) # apply efficiency of each generator to know fuel burn rate diff --git a/workflow/scripts/build_fuel_prices.py b/workflow/scripts/build_fuel_prices.py index b35bdaa4..69f915ab 100644 --- a/workflow/scripts/build_fuel_prices.py +++ b/workflow/scripts/build_fuel_prices.py @@ -19,15 +19,16 @@ **Outputs** - ''data/ng_fuel_prices.csv'': A CSV file containing the hourly fuel prices for each Balancing Authority and State. - """ import pandas as pd import constants as const from _helpers import mock_snakemake, configure_logging + def prepare_eia(eia_fn: str, snapshots: pd.DatetimeIndex = None): - """Cleans EIA fuel price data. + """ + Cleans EIA fuel price data. returns: fuel_costs: pd.DataFrame @@ -35,32 +36,44 @@ def prepare_eia(eia_fn: str, snapshots: pd.DatetimeIndex = None): """ fuel_prices = pd.read_csv(eia_fn) fuel_prices["dol_mwh_th"] = fuel_prices["value"] / const.NG_MCF_2_MWH - - fuel_prices['period'] = pd.to_datetime(fuel_prices.period, format="%Y-%m-%d") - fuel_prices["month"] = fuel_prices['period'].dt.month - fuel_prices.drop(columns=['series-description','period', 'units' , 'value'],inplace=True) + fuel_prices["period"] = pd.to_datetime(fuel_prices.period, format="%Y-%m-%d") + fuel_prices["month"] = fuel_prices["period"].dt.month + fuel_prices.drop( + columns=["series-description", "period", "units", "value"], inplace=True + ) year = snapshots[0].year - fuel_prices['month'] = pd.to_datetime(fuel_prices['month'].astype(str) + '-' + str(year), format='%m-%Y').map(lambda dt: dt.replace(year=year)) + fuel_prices["month"] = pd.to_datetime( + fuel_prices["month"].astype(str) + "-" + str(year), format="%m-%Y" + ).map(lambda dt: dt.replace(year=year)) fuel_prices = fuel_prices.rename(columns={"month": "timestep"}) - fuel_prices = fuel_prices.pivot(index='timestep', columns="state", values='dol_mwh_th') + fuel_prices = fuel_prices.pivot( + index="timestep", columns="state", values="dol_mwh_th" + ) fuel_prices = fuel_prices.reindex(snapshots) - fuel_prices = fuel_prices.fillna(method='bfill').fillna(method='ffill') + fuel_prices = fuel_prices.fillna(method="bfill").fillna(method="ffill") return fuel_prices -def prepare_caiso(caiso_fn: str, snapshots: pd.DatetimeIndex= None): +def prepare_caiso(caiso_fn: str, snapshots: pd.DatetimeIndex = None): caiso_ng = pd.read_csv(caiso_fn) caiso_ng["PRC"] = caiso_ng["PRC"] * const.NG_Dol_MMBTU_2_MWH - caiso_ng.rename(columns={"PRC": "dol_mwh_th","Balancing Authority":"balancing_area"}, inplace=True) - + caiso_ng.rename( + columns={"PRC": "dol_mwh_th", "Balancing Authority": "balancing_area"}, + inplace=True, + ) + year = snapshots[0].year - caiso_ng.day_of_year = pd.to_datetime(caiso_ng.day_of_year, format='%j').map(lambda dt: dt.replace(year=year)) + caiso_ng.day_of_year = pd.to_datetime(caiso_ng.day_of_year, format="%j").map( + lambda dt: dt.replace(year=year) + ) caiso_ng = caiso_ng.rename(columns={"day_of_year": "timestep"}) - caiso_ng = caiso_ng.pivot(index='timestep', columns="balancing_area", values='dol_mwh_th') + caiso_ng = caiso_ng.pivot( + index="timestep", columns="balancing_area", values="dol_mwh_th" + ) caiso_ng = caiso_ng.reindex(snapshots) - caiso_ng = caiso_ng.fillna(method='bfill').fillna(method='ffill') + caiso_ng = caiso_ng.fillna(method="bfill").fillna(method="ffill") return caiso_ng @@ -71,26 +84,26 @@ def main(snakemake): sns_end = pd.to_datetime(snapshot_config["end"]) sns_inclusive = snapshot_config["inclusive"] - snapshots= pd.date_range( - freq="h", - start=sns_start, - end=sns_end, - inclusive=sns_inclusive, - ) + snapshots = pd.date_range( + freq="h", + start=sns_start, + end=sns_end, + inclusive=sns_inclusive, + ) - fuel_prices_caiso= prepare_caiso(snakemake.input.caiso_ng_prices, snapshots) + fuel_prices_caiso = prepare_caiso(snakemake.input.caiso_ng_prices, snapshots) fuel_prices_eia = prepare_eia(snakemake.input.eia_ng_prices, snapshots) fuel_prices = pd.concat([fuel_prices_caiso, fuel_prices_eia], axis=1) - fuel_prices.to_csv(snakemake.output.ng_fuel_prices, index=False) - + fuel_prices.to_csv(snakemake.output.ng_fuel_prices, index=False) -if __name__ == '__main__': +if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake + snakemake = mock_snakemake("build_fuel_prices", interconnect="western") configure_logging(snakemake) - main(snakemake) \ No newline at end of file + main(snakemake) diff --git a/workflow/scripts/plot_validation_production.py b/workflow/scripts/plot_validation_production.py index 8c1fbb14..ae266144 100644 --- a/workflow/scripts/plot_validation_production.py +++ b/workflow/scripts/plot_validation_production.py @@ -65,8 +65,12 @@ def plot_graphs(n, csv_path_1, csv_path_2, save1, save2, save3): historic, order = historic_df(csv_path_1, csv_path_2, buses) optimized = optimized_df(n, order) fig, axes = plt.subplots(3, 1, figsize=(9, 9)) - optimized.resample("1D").mean().plot.area(ax=axes[0], **kwargs, legend=False, title="Optimized") - historic.resample("1D").mean().plot.area(ax=axes[1], **kwargs,legend=False, title="Historic") + optimized.resample("1D").mean().plot.area( + ax=axes[0], **kwargs, legend=False, title="Optimized" + ) + historic.resample("1D").mean().plot.area( + ax=axes[1], **kwargs, legend=False, title="Historic" + ) diff = (optimized - historic).fillna(0).resample("1D").mean() diff.clip(lower=0).plot.area( @@ -148,7 +152,10 @@ def historic_df(csv_path_1, csv_path_2, buses): date_format="%m/%d/%Y %I:%M:%S %p", usecols=selected_cols, ) - historic_first = historic_first[historic_first.Region.map(EIA_930_REGION_MAPPER) == snakemake.wildcards.interconnect] + historic_first = historic_first[ + historic_first.Region.map(EIA_930_REGION_MAPPER) + == snakemake.wildcards.interconnect + ] historic_second = pd.read_csv( csv_path_2, @@ -158,21 +165,22 @@ def historic_df(csv_path_1, csv_path_2, buses): date_format="%m/%d/%Y %I:%M:%S %p", usecols=selected_cols, ) - historic_second = historic_second[historic_second.Region.map(EIA_930_REGION_MAPPER) == snakemake.wildcards.interconnect] + historic_second = historic_second[ + historic_second.Region.map(EIA_930_REGION_MAPPER) + == snakemake.wildcards.interconnect + ] # Clean the data read from csv historic_first_df = ( - historic_first - .fillna(0) + historic_first.fillna(0) .replace({",": ""}, regex=True) - .drop(columns= "Region", axis=1) + .drop(columns="Region", axis=1) .astype(float) ) historic_second_df = ( - historic_second - .fillna(0) + historic_second.fillna(0) .replace({",": ""}, regex=True) - .drop(columns= "Region", axis=1) + .drop(columns="Region", axis=1) .astype(float) ) historic = ( @@ -182,10 +190,12 @@ def historic_df(csv_path_1, csv_path_2, buses): ) historic = historic.rename(columns=rename_his) - historic[historic<0] = 0 # remove negative values for plotting (low impact on results) + historic[historic < 0] = ( + 0 # remove negative values for plotting (low impact on results) + ) order = (historic.diff().abs().sum() / historic.sum()).sort_values().index historic = historic.reindex(order, axis=1, level=1) - historic = historic/ 1e3 + historic = historic / 1e3 return historic, order @@ -201,7 +211,7 @@ def get_regions(n): if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake( #use Validation config + snakemake = mock_snakemake( # use Validation config "plot_validation_figures", interconnect="western", clusters=40, diff --git a/workflow/scripts/prepare_network.py b/workflow/scripts/prepare_network.py index 97b5133e..de10f762 100644 --- a/workflow/scripts/prepare_network.py +++ b/workflow/scripts/prepare_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors - Adapted for PyPSA-USA # # SPDX-License-Identifier: MIT @@ -81,6 +80,7 @@ def add_co2limit(n, co2limit, Nyears=1.0): constant=co2limit * Nyears, ) + def add_regional_co2limit(n, config): """ Add CCL (country & carrier limit) constraint to the network. @@ -101,6 +101,7 @@ def add_regional_co2limit(n, config): electricity: agg_p_nom_limits: data/agg_p_nom_minmax.csv """ + def get_period(n, glc, sns): period = slice(None) if n._multi_invest and not np.isnan(glc["investment_period"]): @@ -108,7 +109,7 @@ def get_period(n, glc, sns): if period not in sns.unique("period"): logger.warning( "Optimized snapshots do not contain the investment " - f"period required for global constraint `{glc.name}`." + f"period required for global constraint `{glc.name}`.", ) return period @@ -118,12 +119,14 @@ def get_period(n, glc, sns): from pypsa.linopt import get_var from pypsa.descriptors import get_switchable_as_dense as get_as_dense - + emissions = n.carriers.co2_emissions gens = n.generators.query("carrier in @emissions.index") if not gens.empty: - efficiency = get_as_dense(n, "Generator", "efficiency", inds=gens.index) #mw_electrical/mw_th - em_pu = gens.carrier.map(emissions) / efficiency #kg_co2/mw_electrical + efficiency = get_as_dense( + n, "Generator", "efficiency", inds=gens.index + ) # mw_electrical/mw_th + em_pu = gens.carrier.map(emissions) / efficiency # kg_co2/mw_electrical em_pu = em_pu.multiply(weightings.generators, axis=0) p = get_var(n, "Generator", "p").loc[sns, gens.index] @@ -132,7 +135,7 @@ def get_period(n, glc, sns): # storage units sus = n.storage_units.query( - "carrier in @emissions.index and " "not cyclic_state_of_charge" + "carrier in @emissions.index and " "not cyclic_state_of_charge", ) sus_i = sus.index if not sus.empty: @@ -167,11 +170,9 @@ def get_period(n, glc, sns): spec=name, ) - - - agg_p_nom_minmax = pd.read_csv( - config["electricity"]["agg_p_nom_limits"], index_col=[0, 1] + config["electricity"]["agg_p_nom_limits"], + index_col=[0, 1], ) logger.info("Adding co2 constraints per state") p_nom = n.model["Generator-p_nom"] @@ -184,16 +185,19 @@ def get_period(n, glc, sns): index = minimum.indexes["group"].intersection(lhs.indexes["group"]) if not index.empty: n.model.add_constraints( - lhs.sel(group=index) >= minimum.loc[index], name="agg_p_nom_min" + lhs.sel(group=index) >= minimum.loc[index], + name="agg_p_nom_min", ) maximum = xr.DataArray(agg_p_nom_minmax["max"].dropna()).rename(dim_0="group") index = maximum.indexes["group"].intersection(lhs.indexes["group"]) if not index.empty: n.model.add_constraints( - lhs.sel(group=index) <= maximum.loc[index], name="agg_p_nom_max" + lhs.sel(group=index) <= maximum.loc[index], + name="agg_p_nom_max", ) + def add_gaslimit(n, gaslimit, Nyears=1.0): sel = n.carriers.index.intersection(["OCGT", "CCGT", "CHP"]) n.carriers.loc[sel, "gas_usage"] = 1.0 @@ -309,7 +313,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): import tsam.timeseriesaggregation as tsam except: raise ModuleNotFoundError( - "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + "Optional dependency 'tsam' not found." "Install via 'pip install tsam'", ) p_max_pu_norm = n.generators_t.p_max_pu.max() @@ -340,7 +344,10 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): n.set_snapshots(pd.DatetimeIndex(snapshots, name="name")) n.snapshot_weightings = pd.Series( - weightings, index=snapshots, name="weightings", dtype="float64" + weightings, + index=snapshots, + name="weightings", + dtype="float64", ) segmented.index = snapshots @@ -394,7 +401,10 @@ def set_line_nom_max( snakemake = mock_snakemake( "prepare_network", interconnect="western", - simpl="", clusters="30", ll="v1.15", opts="CO2L0.75-4H" + simpl="", + clusters="30", + ll="v1.15", + opts="CO2L0.75-4H", ) configure_logging(snakemake) @@ -428,7 +438,7 @@ def set_line_nom_max( for o in opts: if "Co2L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) + m = re.findall(r"[0-9]*\.?[0-9]+$", o) if len(m) > 0: co2limit = float(m[0]) * snakemake.params.co2base add_co2limit(n, co2limit, Nyears) @@ -440,7 +450,7 @@ def set_line_nom_max( for o in opts: if "CH4L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) + m = re.findall(r"[0-9]*\.?[0-9]+$", o) if len(m) > 0: limit = float(m[0]) * 1e6 add_gaslimit(n, limit, Nyears) @@ -472,11 +482,11 @@ def set_line_nom_max( for o in opts: if "Ept" in o: logger.info( - "Setting time dependent emission prices according spot market price" + "Setting time dependent emission prices according spot market price", ) add_dynamic_emission_prices(n) elif "Ep" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) + m = re.findall(r"[0-9]*\.?[0-9]+$", o) if len(m) > 0: logger.info("Setting emission prices according to wildcard value.") add_emission_prices(n, dict(co2=float(m[0]))) diff --git a/workflow/scripts/retrieve_caiso_data.py b/workflow/scripts/retrieve_caiso_data.py index 4f24eba3..c7636b4d 100644 --- a/workflow/scripts/retrieve_caiso_data.py +++ b/workflow/scripts/retrieve_caiso_data.py @@ -4,7 +4,7 @@ [![URL](https://img.shields.io/badge/URL-CAISO_OASIS)]() **Relevant Settings** - + ```yaml fuel_year: ``` @@ -27,7 +27,9 @@ import seaborn as sns -def download_oasis_report(queryname, startdatetime, enddatetime, version, node='ALL', resultformat='6'): +def download_oasis_report( + queryname, startdatetime, enddatetime, version, node="ALL", resultformat="6" +): """ Download a report from CAISO's OASIS, tailored for fuel prices. @@ -49,49 +51,68 @@ def download_oasis_report(queryname, startdatetime, enddatetime, version, node=' "enddatetime": enddatetime, "version": version, "fuel_region_id": node, # Use 'fuel_region_id' instead of 'node' for clarity - "resultformat": resultformat + "resultformat": resultformat, } - + response = requests.get(base_url, params=params) - + if response.status_code == 200: filename = f"{queryname}_{startdatetime}_{enddatetime}.{resultformat}.zip" - with open(filename, 'wb') as file: + with open(filename, "wb") as file: file.write(response.content) print(f"Report downloaded successfully: {filename}") else: print(f"Failed to download report. Status Code: {response.status_code}") - + + def generate_monthly_intervals(year): - """Generate monthly start and end datetime strings for a given year.""" + """ + Generate monthly start and end datetime strings for a given year. + """ intervals = [] for month in range(1, 13): start_date = datetime(year, month, 1) end_date = (start_date + timedelta(days=31)).replace(day=1) - timedelta(days=1) - intervals.append((start_date.strftime('%Y%m%dT%H:%M-0000'), end_date.strftime('%Y%m%dT%H:%M-0000'))) + intervals.append( + ( + start_date.strftime("%Y%m%dT%H:%M-0000"), + end_date.strftime("%Y%m%dT%H:%M-0000"), + ) + ) return intervals -def step_download_oasis_reports(queryname, version, node='ALL', resultformat='6', year=2019): - """Download and combine OASIS reports for each month of a given year into a single DataFrame.""" + +def step_download_oasis_reports( + queryname, version, node="ALL", resultformat="6", year=2019 +): + """ + Download and combine OASIS reports for each month of a given year into a + single DataFrame. + """ monthly_intervals = generate_monthly_intervals(year) - file_names = [] + file_names = [] for startdatetime, enddatetime in monthly_intervals: - download_oasis_report(queryname, startdatetime, enddatetime, version, node, resultformat) + download_oasis_report( + queryname, startdatetime, enddatetime, version, node, resultformat + ) filename = f"/{queryname}_{startdatetime}_{enddatetime}.{resultformat}.zip" file_names.append(filename) time.sleep(5) return file_names + def combine_reports(file_names, year): - """Combine all reports into a single DataFrame.""" + """ + Combine all reports into a single DataFrame. + """ all_data_frames = [] for file in file_names: - df = pd.read_csv(os.getcwd()+ '/' + file, compression='zip') + df = pd.read_csv(os.getcwd() + "/" + file, compression="zip") all_data_frames.append(df) combined_data = pd.concat(all_data_frames, ignore_index=True) - combined_data.sort_values(by='INTERVALSTARTTIME_GMT', inplace=True) + combined_data.sort_values(by="INTERVALSTARTTIME_GMT", inplace=True) return combined_data @@ -112,23 +133,43 @@ def get_files_starting_with(folder_path, prefix): file_names.append(file_name) return file_names + def merge_fuel_regions_data(combined_data, year): - """Merge the fuel regions with the combined data.""" - df = pd.read_excel(snakemake.input.fuel_regions, sheet_name='GPI_Fuel_Region') - df = df[['Fuel Region', 'Balancing Authority']] - df['Fuel Region'] = df['Fuel Region'].str.strip(' ') + """ + Merge the fuel regions with the combined data. + """ + df = pd.read_excel(snakemake.input.fuel_regions, sheet_name="GPI_Fuel_Region") + df = df[["Fuel Region", "Balancing Authority"]] + df["Fuel Region"] = df["Fuel Region"].str.strip(" ") - combined_data_merged = pd.merge(combined_data, df, left_on='FUEL_REGION_ID', right_on='Fuel Region', how='left') - combined_data_merged.drop(columns=['Fuel Region','FUEL_REGION_ID_XML'], inplace=True) + combined_data_merged = pd.merge( + combined_data, df, left_on="FUEL_REGION_ID", right_on="Fuel Region", how="left" + ) + combined_data_merged.drop( + columns=["Fuel Region", "FUEL_REGION_ID_XML"], inplace=True + ) return combined_data_merged + def reduce_select_pricing_nodes(combined_data_merged): - '''Reduces data to day of year and Balancing Authority. Averages across all pricing nodes for each day of year and Balancing Authority''' - combined_data_merged['day_of_year'] = pd.to_datetime(combined_data_merged['INTERVALSTARTTIME_GMT']).dt.dayofyear + """ + Reduces data to day of year and Balancing Authority. + + Averages across all pricing nodes for each day of year and Balancing + Authority + """ + combined_data_merged["day_of_year"] = pd.to_datetime( + combined_data_merged["INTERVALSTARTTIME_GMT"] + ).dt.dayofyear # avg_doy = combined_data_merged[['day_of_year', 'Balancing Authority','FUEL_REGION_ID','PRC']].groupby(['day_of_year', 'Balancing Authority','FUEL_REGION_ID']).mean() #use this when we want to assign specific pricing nodes - avg_doy = combined_data_merged[['day_of_year', 'Balancing Authority','PRC']].groupby(['day_of_year', 'Balancing Authority']).mean() + avg_doy = ( + combined_data_merged[["day_of_year", "Balancing Authority", "PRC"]] + .groupby(["day_of_year", "Balancing Authority"]) + .mean() + ) return avg_doy + def main(snakemake): fuel_year = snakemake.params.fuel_year @@ -138,7 +179,7 @@ def main(snakemake): version="1", node="ALL", resultformat="6", - year=fuel_year + year=fuel_year, ) combined_data = combine_reports(reports, fuel_year) @@ -146,9 +187,10 @@ def main(snakemake): combined_data_merged = merge_fuel_regions_data(combined_data, year=fuel_year) reduced_fuel_price_data = reduce_select_pricing_nodes(combined_data_merged) - #check to make sure units are in $/MWh_thermal??? or better unit? + # check to make sure units are in $/MWh_thermal??? or better unit? reduced_fuel_price_data.to_csv(snakemake.output.fuel_prices) + if __name__ == "__main__": - main(snakemake) \ No newline at end of file + main(snakemake) diff --git a/workflow/scripts/retrieve_eia_data.py b/workflow/scripts/retrieve_eia_data.py index 81bc721a..b89b6f8a 100644 --- a/workflow/scripts/retrieve_eia_data.py +++ b/workflow/scripts/retrieve_eia_data.py @@ -7,7 +7,6 @@ - ``data/GridEmissions/EIA_DMD_2018_2024.csv`` - ``data/eia/EIA_DMD_*.csv`` - """ import glob diff --git a/workflow/scripts/solve_network_local.py b/workflow/scripts/solve_network_local.py index dbe842d0..67ae95af 100644 --- a/workflow/scripts/solve_network_local.py +++ b/workflow/scripts/solve_network_local.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -70,10 +69,11 @@ def _add_land_use_constraint(n): if len(existing_large): logger.warning( f"Existing capacities larger than technical potential for {existing_large},\ - adjust technical potential to existing capacities" + adjust technical potential to existing capacities", ) n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[ - existing_large, "p_nom_min" + existing_large, + "p_nom_min", ] n.generators.p_nom_max.clip(lower=0, inplace=True) @@ -89,12 +89,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config): for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"] ind = list( - set( - [ - i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] - for i in existing.index - ] - ) + {i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index}, ) previous_years = [ @@ -225,8 +220,12 @@ def add_CCL_constraints(n, config): electricity: agg_p_nom_limits: config/policy_constraints/agg_p_nom_minmax.csv """ - agg_p_nom_minmax = pd.read_csv(config["electricity"]["agg_p_nom_limits"], index_col=[1, 2]) - agg_p_nom_minmax = agg_p_nom_minmax[agg_p_nom_minmax.planning_horizon == int(snakemake.params.planning_horizons[0])].drop(columns= 'planning_horizon') + agg_p_nom_minmax = pd.read_csv( + config["electricity"]["agg_p_nom_limits"], index_col=[1, 2] + ) + agg_p_nom_minmax = agg_p_nom_minmax[ + agg_p_nom_minmax.planning_horizon == int(snakemake.params.planning_horizons[0]) + ].drop(columns="planning_horizon") logger.info("Adding generation capacity constraints per carrier and country") p_nom = n.model["Generator-p_nom"] @@ -241,14 +240,16 @@ def add_CCL_constraints(n, config): index = minimum.indexes["group"].intersection(lhs.indexes["group"]) if not index.empty: n.model.add_constraints( - lhs.sel(group=index) >= minimum.loc[index], name="agg_p_nom_min" + lhs.sel(group=index) >= minimum.loc[index], + name="agg_p_nom_min", ) maximum = xr.DataArray(agg_p_nom_minmax["max"].dropna()).rename(dim_0="group") index = maximum.indexes["group"].intersection(lhs.indexes["group"]) if not index.empty: n.model.add_constraints( - lhs.sel(group=index) <= maximum.loc[index], name="agg_p_nom_max" + lhs.sel(group=index) <= maximum.loc[index], + name="agg_p_nom_max", ) @@ -270,8 +271,13 @@ def add_RPS_constraints(n, config): electricity: portfolio_standards: config/policy_constraints/portfolio_standards.csv """ - portfolio_standards = pd.read_csv(config["electricity"]["portfolio_standards"], index_col=[1, 2]) - portfolio_standards = portfolio_standards[portfolio_standards.planning_horizon == int(snakemake.params.planning_horizons[0])].drop(columns= 'planning_horizon') + portfolio_standards = pd.read_csv( + config["electricity"]["portfolio_standards"], index_col=[1, 2] + ) + portfolio_standards = portfolio_standards[ + portfolio_standards.planning_horizon + == int(snakemake.params.planning_horizons[0]) + ].drop(columns="planning_horizon") logger.info("Adding generation capacity constraints per carrier and country") p = n.model["Generator-p"] @@ -282,11 +288,11 @@ def add_RPS_constraints(n, config): lhs = p.groupby(grouper).sum().rename(bus="country") pct = xr.DataArray(portfolio_standards["pct"].dropna()).rename(dim_0="group") - new_tuples=[] - for pct_tuple in pct.indexes["group"]: # loop through each RPS policy + new_tuples = [] + for pct_tuple in pct.indexes["group"]: # loop through each RPS policy region, carriers = pct_tuple - carriers_list = [carrier.strip() for carrier in carriers.split(',')] + carriers_list = [carrier.strip() for carrier in carriers.split(",")] if isinstance(carriers_list, list): # Create a new tuple for each energy type and append to new list for carrier in carriers_list: @@ -295,19 +301,20 @@ def add_RPS_constraints(n, config): # If it's not a list, just append the original tuple new_tuples.append(pct_tuple) - new_multi_index = pd.MultiIndex.from_tuples(new_tuples, names=['region','carrier']) + new_multi_index = pd.MultiIndex.from_tuples( + new_tuples, names=["region", "carrier"] + ) index = new_multi_index.intersection(lhs.indexes["group"]) if not index.empty: logger.info(f"Adding RPS constraint for {region}") n.model.add_constraints( lhs.sel(group=index).sum() - >= pct.loc[region].values[0] * - (lhs.sel(group=region).sum()), name=f"portfolio_standard_{region}" + >= pct.loc[region].values[0] * (lhs.sel(group=region).sum()), + name=f"portfolio_standard_{region}", ) - def add_EQ_constraints(n, o, scaling=1e-1): """ Add equity constraints to the network. @@ -332,7 +339,7 @@ def add_EQ_constraints(n, o, scaling=1e-1): each node to produce on average at least 70% of its consumption. """ # TODO: Generalize to cover myopic and other sectors? - float_regex = "[0-9]*\.?[0-9]+" + float_regex = r"[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) if o[-1] == "c": ggrouper = n.generators.bus.map(n.buses.country) @@ -417,14 +424,20 @@ def add_regional_co2limit(n, config): from pypsa.linopt import get_var from pypsa.descriptors import get_switchable_as_dense as get_as_dense - - regional_co2_lims = pd.read_csv(config["electricity"]["regional_Co2_limits"], index_col=[0]) + + regional_co2_lims = pd.read_csv( + config["electricity"]["regional_Co2_limits"], index_col=[0] + ) logger.info("Adding regional Co2 Limits.") - regional_co2_lims = regional_co2_lims[regional_co2_lims.planning_horizon == int(snakemake.params.planning_horizons[0])] + regional_co2_lims = regional_co2_lims[ + regional_co2_lims.planning_horizon == int(snakemake.params.planning_horizons[0]) + ] for region in regional_co2_lims.index: region_co2lim = regional_co2_lims.loc[region].limit - EF_unspecified = regional_co2_lims.loc[region].import_emissions_factor # if not none #MT CO₂e/MWh_elec + EF_unspecified = regional_co2_lims.loc[ + region + ].import_emissions_factor # if not none #MT CO₂e/MWh_elec emissions = n.carriers.co2_emissions # generators @@ -432,10 +445,14 @@ def add_regional_co2limit(n, config): region_gens = region_gens.query("carrier in @emissions.index") if not region_gens.empty: - efficiency = get_as_dense(n, "Generator", "efficiency",inds= region_gens.index) #mw_elect/mw_th - em_pu = region_gens.carrier.map(emissions) / efficiency #kg_co2/mw_electrical + efficiency = get_as_dense( + n, "Generator", "efficiency", inds=region_gens.index + ) # mw_elect/mw_th + em_pu = ( + region_gens.carrier.map(emissions) / efficiency + ) # kg_co2/mw_electrical em_pu = em_pu.multiply(weightings.generators, axis=0) - p = (n.model["Generator-p"].loc[:, region_gens.index]) + p = n.model["Generator-p"].loc[:, region_gens.index] lhs = (p * em_pu).sum() # # Imports @@ -450,7 +467,7 @@ def add_regional_co2limit(n, config): # inter_regional_flows = (n.model["Line-s"].loc[:, inter_regional_lines.index]) # regional_imports = np.max(inter_regional_flows, 0) # inter_regional_imports = inter_regional_flows.where(inter_regional_flows <= 0) - + # #this causes no line flow # # inter_regional_imports = inter_regional_flows.where(inter_regional_flows >= 0) #this causes no line flow # # lhs = (inter_regional_imports).sum() @@ -459,6 +476,7 @@ def add_regional_co2limit(n, config): rhs = region_co2lim n.model.add_constraints(lhs <= rhs, name=f"{region}_co2_limit") + def add_SAFE_constraints(n, config): """ Add a capacity reserve margin of a certain fraction above the peak demand. @@ -484,12 +502,12 @@ def add_SAFE_constraints(n, config): reserve_margin = peakdemand * margin conventional_carriers = config["electricity"]["conventional_carriers"] ext_gens_i = n.generators.query( - "carrier in @conventional_carriers & p_nom_extendable" + "carrier in @conventional_carriers & p_nom_extendable", ).index p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] lhs = p_nom.sum() exist_conv_caps = n.generators.query( - "~p_nom_extendable & carrier in @conventional_carriers" + "~p_nom_extendable & carrier in @conventional_carriers", ).p_nom.sum() rhs = reserve_margin - exist_conv_caps n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap") @@ -515,21 +533,25 @@ def add_regional_SAFE_constraints(n, config): SAFE_reservemargin: 0.1 Which sets a reserve margin of 10% above the peak demand. """ - regional_prm = pd.read_csv(config["electricity"]["SAFE_regional_reservemargins"], index_col=[0]) + regional_prm = pd.read_csv( + config["electricity"]["SAFE_regional_reservemargins"], index_col=[0] + ) for region in regional_prm.index: - peakdemand = n.loads_t.p_set.loc[:,n.loads.bus.str.contains(region)].sum(axis=1).max() + peakdemand = ( + n.loads_t.p_set.loc[:, n.loads.bus.str.contains(region)].sum(axis=1).max() + ) margin = 1.0 + regional_prm.loc[region].item() reserve_margin = peakdemand * margin conventional_carriers = config["electricity"]["conventional_carriers"] region_gens = n.generators[n.generators.bus.str.contains(region)] ext_gens_i = region_gens.query( - "carrier in @conventional_carriers & p_nom_extendable" + "carrier in @conventional_carriers & p_nom_extendable", ).index p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] lhs = p_nom.sum() exist_conv_caps = region_gens.query( - "~p_nom_extendable & carrier in @conventional_carriers" + "~p_nom_extendable & carrier in @conventional_carriers", ).p_nom.sum() rhs = reserve_margin - exist_conv_caps n.model.add_constraints(lhs >= rhs, name=f"safe_mintotalcap_{region}") @@ -562,7 +584,10 @@ def add_operational_reserve_margin(n, sns, config): # Reserve Variables n.model.add_variables( - 0, np.inf, coords=[sns, n.generators.index], name="Generator-r" + 0, + np.inf, + coords=[sns, n.generators.index], + name="Generator-r", ) reserve = n.model["Generator-r"] summed_reserve = reserve.sum("Generator") @@ -578,7 +603,7 @@ def add_operational_reserve_margin(n, sns, config): .rename({"Generator-ext": "Generator"}) ) lhs = summed_reserve + (p_nom_vres * (-EPSILON_VRES * capacity_factor)).sum( - "Generator" + "Generator", ) # Total demand per t @@ -603,7 +628,7 @@ def add_operational_reserve_margin(n, sns, config): reserve = n.model["Generator-r"] capacity_variable = n.model["Generator-p_nom"].rename( - {"Generator-ext": "Generator"} + {"Generator-ext": "Generator"}, ) capacity_fixed = n.generators.p_nom[fix_i] @@ -699,7 +724,7 @@ def add_pipe_retrofit_constraint(n): """ gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index h2_retrofitted_i = n.links.query( - "carrier == 'H2 pipeline retrofitted' and p_nom_extendable" + "carrier == 'H2 pipeline retrofitted' and p_nom_extendable", ).index if h2_retrofitted_i.empty or gas_pipes_i.empty: @@ -758,7 +783,8 @@ def solve_network(n, config, solving, opts="", **kwargs): kwargs["extra_functionality"] = extra_functionality kwargs["transmission_losses"] = cf_solving.get("transmission_losses", False) kwargs["linearized_unit_commitment"] = cf_solving.get( - "linearized_unit_commitment", False + "linearized_unit_commitment", + False, ) kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False) @@ -784,12 +810,12 @@ def solve_network(n, config, solving, opts="", **kwargs): kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),) kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),) status, condition = n.optimize.optimize_transmission_expansion_iteratively( - **kwargs + **kwargs, ) if status != "ok" and not rolling_horizon: logger.warning( - f"Solving status '{status}' with termination condition '{condition}'" + f"Solving status '{status}' with termination condition '{condition}'", ) if "infeasible" in condition: raise RuntimeError("Solving status 'infeasible'") @@ -810,12 +836,13 @@ def solve_network(n, config, solving, opts="", **kwargs): sector_opts="", sector="E", planning_horizons="2030", - interconnect="western" + interconnect="western", ) configure_logging(snakemake) if "sector_opts" in snakemake.wildcards.keys(): update_config_with_sector_opts( - snakemake.config, snakemake.wildcards.sector_opts + snakemake.config, + snakemake.wildcards.sector_opts, ) opts = snakemake.wildcards.opts diff --git a/workflow/tests/test_yaml_structure.py b/workflow/tests/test_yaml_structure.py index 958da1d8..76b0b5e1 100644 --- a/workflow/tests/test_yaml_structure.py +++ b/workflow/tests/test_yaml_structure.py @@ -16,7 +16,9 @@ def compare_structures(data1, data2, path="", yaml_name=""): if isinstance(data1, dict): for key in data1: if key not in data2: - print(f"Missing key '{key}' in second structure at {path} in {yaml_name}") + print( + f"Missing key '{key}' in second structure at {path} in {yaml_name}" + ) continue compare_structures( data1[key], @@ -26,12 +28,16 @@ def compare_structures(data1, data2, path="", yaml_name=""): ) for key in data2: if key not in data1: - print(f"Missing key '{key}' in first structure at {path} in {yaml_name}") + print( + f"Missing key '{key}' in first structure at {path} in {yaml_name}" + ) return True elif isinstance(data1, list): # For simplicity, just compare the first item if it exists, assuming homogeneous lists if data1 and data2: - compare_structures(data1[0], data2[0], path=f"{path}[0]", yaml_name=yaml_name) + compare_structures( + data1[0], data2[0], path=f"{path}[0]", yaml_name=yaml_name + ) elif not data1 and data2 or data1 and not data2: print(f"List length mismatch or one is empty at {path} in {yaml_name}") return True @@ -50,6 +56,7 @@ def test_yaml_structure(filepath1, filepath2): else: print(f"The structures in {filepath1} and {filepath2} do not match.") + print("\n") test_yaml_structure( "../config/tests/config.test_simple.yaml", @@ -67,4 +74,4 @@ def test_yaml_structure(filepath1, filepath2): "../config/tests/config.validation.yaml", "../config/config.default.yaml", ) -print("\n") \ No newline at end of file +print("\n")