Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ext data export, Variable defaults, and Data File Scenario Filtering #38

Merged
merged 5 commits into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions src/r2x/api.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""R2X API for data model."""

import csv
import json
from collections.abc import Callable
from os import PathLike
from pathlib import Path
Expand Down Expand Up @@ -51,7 +52,7 @@ def export_component_to_csv(
# Get desired components to offload to csv
components = map(
lambda component: component.model_dump(
exclude={"ext"}, exclude_none=True, mode="json", context={"magnitude_only": True}
exclude={}, exclude_none=True, mode="json", context={"magnitude_only": True}
),
self.get_components(component, filter_func=filter_func),
)
Expand Down Expand Up @@ -163,7 +164,11 @@ def _export_dict_to_csv(
writer.writeheader()
for row in data:
filter_row = {
key: value if not isinstance(value, dict) else value.get(unnest_key)
key: json.dumps(value)
if key == "ext" and isinstance(value, dict)
else value
if not isinstance(value, dict)
else value.get(unnest_key)
for key, value in row.items()
ktehranchi marked this conversation as resolved.
Show resolved Hide resolved
}
writer.writerow(filter_row)
Expand Down
20 changes: 4 additions & 16 deletions src/r2x/exporter/sienna.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,16 +147,11 @@ def process_branch_data(self, fname: str = "branch.csv") -> None:
"b",
"rate",
"branch_type",
"rating_up",
"rating_down",
"ext",
]

# NOTE: We need to decide what we do if the user provides a rate or bi-directional rate
# if "rate" in output_df.columns:
# output_df["rate"] = output_df["rate"].fillna(
# (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
# )
# else:
# output_df["rate"] = (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2

self.system.export_component_to_csv(
ACBranch,
fpath=self.output_folder / fname,
Expand All @@ -168,8 +163,8 @@ def process_branch_data(self, fname: str = "branch.csv") -> None:
"class_type": "branch_type",
"rating": "rate",
"b": "primary_shunt",
"ext": "ext",
ktehranchi marked this conversation as resolved.
Show resolved Hide resolved
},
# restval=0.0,
)
logger.info(f"File {fname} created.")

Expand All @@ -189,13 +184,6 @@ def process_dc_branch_data(self, fname="dc_branch.csv") -> None:
"loss",
]

# NOTE: We need to decide what we do if the user provides a rate or bi-directional rate
# if "rate" in output_df.columns:
# output_df["rate"] = output_df["rate"].fillna(
# (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
# )
# else:
# output_df["rate"] = (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
self.system.export_component_to_csv(
DCBranch,
fpath=self.output_folder / fname,
Expand Down
4 changes: 4 additions & 0 deletions src/r2x/parser/parser_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ def prepare_ext_field(valid_fields, extra_fields):
"""Cleanses the extra fields by removing any timeseries data"""
if extra_fields:
# Implement any filtering of ext_data here
# logger.debug("Extra fields: {}", extra_fields)
# remove any non eligible datatypes from extra fields
eligible_datatypes = [str, int, float, bool]
extra_fields = {k: v for k, v in extra_fields.items() if type(v) in eligible_datatypes}
valid_fields["ext"] = extra_fields
else:
valid_fields["ext"] = {}
Expand Down
76 changes: 69 additions & 7 deletions src/r2x/parser/plexos.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
"timeslice_tag": pl.String,
"timeslice": pl.String,
"timeslice_value": pl.Float32,
"data_text": pl.String,
}
COLUMNS = [
"name",
Expand Down Expand Up @@ -285,6 +286,7 @@ def _get_fuel_prices(self):
"variable",
"action",
"variable_tag",
"variable_default",
"timeslice",
"timeslice_value",
]
Expand Down Expand Up @@ -420,7 +422,7 @@ def _construct_branches(self, default_model=MonitoredLine):
)
for line in lines_pivot.iter_rows(named=True):
line_properties_mapped = {self.property_map.get(key, key): value for key, value in line.items()}
line_properties_mapped["rating"] = line_properties_mapped.pop("max_power_flow", None)
line_properties_mapped["rating"] = line_properties_mapped.get("max_power_flow", None)
line_properties_mapped["rating_up"] = line_properties_mapped.pop("max_power_flow", None)
line_properties_mapped["rating_down"] = line_properties_mapped.pop("min_power_flow", None)

Expand Down Expand Up @@ -488,7 +490,6 @@ def _construct_generators(self):
system_generators = (pl.col("child_class_name") == ClassEnum.Generator.name) & (
pl.col("parent_class_name") == ClassEnum.System.name
)

system_generators = self._get_model_data(system_generators)
if getattr(self.config.feature_flags, "plexos-csv", None):
system_generators.write_csv("generators.csv")
Expand Down Expand Up @@ -536,6 +537,7 @@ def _construct_generators(self):
"variable",
"action",
"variable_tag",
"variable_default",
ktehranchi marked this conversation as resolved.
Show resolved Hide resolved
"timeslice",
"timeslice_value",
]
Expand Down Expand Up @@ -650,7 +652,6 @@ def _add_generator_reserves(self):
)
continue
reserve_map.mapping[reserve_object.name].append(generator.name)

return

def _construct_batteries(self):
Expand All @@ -677,6 +678,7 @@ def _construct_batteries(self):
"variable",
"action",
"variable_tag",
"variable_default",
"timeslice",
"timeslice_value",
]
Expand All @@ -694,7 +696,6 @@ def _construct_batteries(self):
mapped_records["prime_mover_type"] = PrimeMoversType.BA

valid_fields, ext_data = field_filter(mapped_records, GenericBattery.model_fields)

valid_fields = self._set_unit_availability(valid_fields)
if valid_fields is None:
continue
Expand Down Expand Up @@ -754,6 +755,7 @@ def _add_battery_reserves(self):
parent_class=ClassEnum.Reserve,
collection=CollectionEnum.Batteries,
)

for battery in self.system.get_components(GenericBattery):
reserves = [membership for membership in generator_memberships if membership[3] == battery.name]
if reserves:
Expand Down Expand Up @@ -1072,7 +1074,7 @@ def _get_model_data(self, data_filter) -> pl.DataFrame:
variable_filter = (
(pl.col("child_class_name") == ClassEnum.Variable.name)
& (pl.col("parent_class_name") == ClassEnum.System.name)
& (pl.col("data_file").is_not_null())
& (pl.col("property_name") != "Sampling Method")
)
variable_scenario_data = None
if scenario_specific_data is not None and scenario_filter is not None:
Expand All @@ -1084,8 +1086,65 @@ def _get_model_data(self, data_filter) -> pl.DataFrame:
variable_base_data = self.plexos_data.filter(variable_filter & base_case_filter)
if variable_base_data is not None and variable_scenario_data is not None:
variable_data = pl.concat([variable_scenario_data, variable_base_data])
system_data = self._join_variable_data(system_data, variable_data)

# Get System Data Files
# drop column named data_file and replace it with correct scenario-filtered datafile
# system_data.drop_in_place("data_file")
datafile_data = None
datafile_filter = (pl.col("child_class_name") == ClassEnum.DataFile.value) & (
pl.col("parent_class_name") == ClassEnum.System.name
)
datafile_scenario_data = None
if scenario_specific_data is not None and scenario_filter is not None:
datafile_scenario_data = self.plexos_data.filter(datafile_filter & scenario_filter)

return self._join_variable_data(system_data, variable_data)
if datafile_scenario_data is not None:
datafile_base_data = self.plexos_data.filter(datafile_filter & pl.col("scenario").is_null())
else:
datafile_base_data = self.plexos_data.filter(datafile_filter & base_case_filter)
if datafile_base_data is not None and datafile_scenario_data is not None:
datafile_data = pl.concat([datafile_scenario_data, datafile_base_data])
system_data = self._join_datafile_data(system_data, datafile_data)
return system_data

def _join_datafile_data(self, system_data, datafile_data):
"""Join system data with datafile data."""
# Filter datafiles
if datafile_data.height > 0:
results = []
grouped = datafile_data.group_by("name")
for group_name, group_df in grouped:
if group_df.height > 1:
# Check if any scenario_name exists
scenario_exists = group_df.filter(pl.col("scenario").is_not_null())

if scenario_exists.height > 0:
# Select the first row with a scenario_name
selected_row = scenario_exists[0]
else:
# If no scenario_name, select the row with the lowest band_id
selected_row = group_df.sort("band").head(1)[0]
else:
# If the group has only one row, select that row
selected_row = group_df[0]
results.append(
{
"name": group_name[0],
"data_file_sc": selected_row["data_text"][0],
}
)
datafiles_filtered = pl.DataFrame(results)
system_data = system_data.join(
datafiles_filtered, left_on="data_file_tag", right_on="name", how="left"
)
# replace system_Data["data_file"] with system_data["data_file_sc"]
system_data.drop_in_place("data_file")
system_data = system_data.rename({"data_file_sc": "data_file"})
else:
# NOTE: We might want to include this at the instead of each function call
system_data = system_data.with_columns(pl.lit(None).alias("data_file_sc"))
return system_data

def _join_variable_data(self, system_data, variable_data):
"""Join system data with variable data."""
Expand All @@ -1107,12 +1166,12 @@ def _join_variable_data(self, system_data, variable_data):
else:
# If the group has only one row, select that row
selected_row = group_df[0]

results.append(
{
"name": group_name[0],
"variable_name": selected_row["data_file_tag"][0],
"variable": selected_row["data_file"][0],
"variable_default": selected_row["property_value"][0],
ktehranchi marked this conversation as resolved.
Show resolved Hide resolved
}
)
variables_filtered = pl.DataFrame(results)
Expand Down Expand Up @@ -1387,9 +1446,12 @@ def _get_value(self, prop_value, unit, record, record_name):
if data_file is None and record.get("data_file"):
return None

var_default = record.get("variable_default")
variable = (
self._csv_file_handler(record.get("variable_tag"), record.get("variable"))
if record.get("variable")
else var_default
if var_default != 0
else None
)

Expand Down