diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml index 25c3f46cd..37ed8ded6 100644 --- a/.github/workflows/sonar.yml +++ b/.github/workflows/sonar.yml @@ -20,7 +20,6 @@ concurrency: jobs: sonar-cloud: - if: (github.event_name == 'push') || (!startsWith(github.head_ref, 'release')) name: SonarCloud runs-on: ubuntu-24.04 env: diff --git a/VERSION b/VERSION index 2e0e38c63..c044b1a32 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.9 +1.10 diff --git a/code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja b/code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja similarity index 100% rename from code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja rename to code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja diff --git a/docs/examples/Power Flow Example.ipynb b/docs/examples/Power Flow Example.ipynb index 9662e4aaf..ab33b40e4 100644 --- a/docs/examples/Power Flow Example.ipynb +++ b/docs/examples/Power Flow Example.ipynb @@ -447,8 +447,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "List of component types in result dataset and associated data types\n", - "{'ComponentType.node': , 'ComponentType.line': , 'ComponentType.sym_load': }\n", + "List of component types in result dataset\n", + "[, , ]\n", "------node result------\n", "('id', 'energized', 'u_pu', 'u', 'u_angle', 'p', 'q')\n", "------line result attributes------\n", @@ -471,8 +471,8 @@ " },\n", ")\n", "\n", - "print(\"List of component types in result dataset and associated data types\")\n", - "print({str(component_type): type(component_data) for component_type, component_data in output_data.items()})\n", + "print(\"List of component types in result dataset\")\n", + "print(list(output_data.keys()))\n", "print(\"------node result------\")\n", "print(output_data[ComponentType.node].dtype.names)\n", "print(\"------line result attributes------\")\n", @@ -499,8 +499,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "List of component types in result dataset and associated data types\n", - "{'ComponentType.node': , 'ComponentType.line': , 'ComponentType.sym_load': , 'ComponentType.source': }\n", + "List of component types in result dataset\n", + "[, , , ]\n", "------node result------\n", "['id', 'energized', 'u_pu', 'u', 'u_angle', 'p', 'q']\n", "------line result attributes------\n", @@ -519,8 +519,8 @@ " output_component_types=ComponentAttributeFilterOptions.everything, # all attributes for all component types as columns\n", ")\n", "\n", - "print(\"List of component types in result dataset and associated data types\")\n", - "print({str(component_type): type(component_data) for component_type, component_data in output_data.items()})\n", + "print(\"List of component types in result dataset\")\n", + "print(list(output_data.keys()))\n", "print(\"------node result------\")\n", "print(list(output_data[ComponentType.node].keys()))\n", "print(\"------line result attributes------\")\n", @@ -1430,6 +1430,7 @@ "# we run the batch calculation with continue_on_batch_error=True,\n", "# it will return the results with partially valid data\n", "\n", + "\n", "output_data = model.calculate_power_flow(update_data=time_series_mutation, continue_on_batch_error=True)\n", "\n", "# print node data for u_pu, note that the data is rubbish for scenario 3 and 7\n", diff --git a/docs/examples/Validation Examples.ipynb b/docs/examples/Validation Examples.ipynb index 3036a962e..e47ba6fba 100644 --- a/docs/examples/Validation Examples.ipynb +++ b/docs/examples/Validation Examples.ipynb @@ -113,8 +113,8 @@ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mIDWrongType\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[2], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Without validation\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mPowerGridModel\u001b[49m\u001b[43m(\u001b[49m\u001b[43merror_data\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m output_data \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mcalculate_state_estimation(symmetric\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n", - "File \u001b[0;32m~/pgm/power-grid-model/src/power_grid_model/core/power_grid_model.py:125\u001b[0m, in \u001b[0;36mPowerGridModel.__init__\u001b[0;34m(self, input_data, system_frequency)\u001b[0m\n\u001b[1;32m 123\u001b[0m prepared_input \u001b[38;5;241m=\u001b[39m prepare_input_view(_map_to_component_types(input_data))\n\u001b[1;32m 124\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model_ptr \u001b[38;5;241m=\u001b[39m pgc\u001b[38;5;241m.\u001b[39mcreate_model(system_frequency, input_data\u001b[38;5;241m=\u001b[39mprepared_input\u001b[38;5;241m.\u001b[39mget_dataset_ptr())\n\u001b[0;32m--> 125\u001b[0m \u001b[43massert_no_error\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_all_component_count \u001b[38;5;241m=\u001b[39m {k: v \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m prepared_input\u001b[38;5;241m.\u001b[39mget_info()\u001b[38;5;241m.\u001b[39mtotal_elements()\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m v \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m}\n", - "File \u001b[0;32m~/pgm/power-grid-model/src/power_grid_model/core/error_handling.py:169\u001b[0m, in \u001b[0;36massert_no_error\u001b[0;34m(batch_size, decode_error)\u001b[0m\n\u001b[1;32m 167\u001b[0m error \u001b[38;5;241m=\u001b[39m find_error(batch_size\u001b[38;5;241m=\u001b[39mbatch_size, decode_error\u001b[38;5;241m=\u001b[39mdecode_error)\n\u001b[1;32m 168\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m error \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 169\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error\n", + "File \u001b[0;32m~/pgm/power-grid-model/src/power_grid_model/_core/power_grid_model.py:125\u001b[0m, in \u001b[0;36mPowerGridModel.__init__\u001b[0;34m(self, input_data, system_frequency)\u001b[0m\n\u001b[1;32m 123\u001b[0m prepared_input \u001b[38;5;241m=\u001b[39m prepare_input_view(_map_to_component_types(input_data))\n\u001b[1;32m 124\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model_ptr \u001b[38;5;241m=\u001b[39m pgc\u001b[38;5;241m.\u001b[39mcreate_model(system_frequency, input_data\u001b[38;5;241m=\u001b[39mprepared_input\u001b[38;5;241m.\u001b[39mget_dataset_ptr())\n\u001b[0;32m--> 125\u001b[0m \u001b[43massert_no_error\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_all_component_count \u001b[38;5;241m=\u001b[39m {k: v \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m prepared_input\u001b[38;5;241m.\u001b[39mget_info()\u001b[38;5;241m.\u001b[39mtotal_elements()\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m v \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m}\n", + "File \u001b[0;32m~/pgm/power-grid-model/src/power_grid_model/_core/error_handling.py:169\u001b[0m, in \u001b[0;36massert_no_error\u001b[0;34m(batch_size, decode_error)\u001b[0m\n\u001b[1;32m 167\u001b[0m error \u001b[38;5;241m=\u001b[39m find_error(batch_size\u001b[38;5;241m=\u001b[39mbatch_size, decode_error\u001b[38;5;241m=\u001b[39mdecode_error)\n\u001b[1;32m 168\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m error \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 169\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error\n", "\u001b[0;31mIDWrongType\u001b[0m: Wrong type for object with id 4\n\nTry validate_input_data() or validate_batch_data() to validate your data.\n" ] } diff --git a/setup.py b/setup.py index 0a4fdbcd0..82a63d362 100644 --- a/setup.py +++ b/setup.py @@ -208,7 +208,7 @@ def generate_build_ext(pkg_dir: Path, pkg_name: str): # list of extensions exts = [ CTypesExtension( - name="power_grid_model.core._power_grid_core", + name="power_grid_model._core._power_grid_core", sources=sources, include_dirs=include_dirs, library_dirs=library_dirs, diff --git a/src/power_grid_model/__init__.py b/src/power_grid_model/__init__.py index a388541bb..c22dbdb35 100644 --- a/src/power_grid_model/__init__.py +++ b/src/power_grid_model/__init__.py @@ -4,9 +4,9 @@ """Power Grid Model""" -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.core.power_grid_meta import initialize_array, power_grid_meta_data -from power_grid_model.core.power_grid_model import PowerGridModel +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType +from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data +from power_grid_model._core.power_grid_model import PowerGridModel from power_grid_model.enum import ( Branch3Side, BranchSide, diff --git a/src/power_grid_model/core/__init__.py b/src/power_grid_model/_core/__init__.py similarity index 100% rename from src/power_grid_model/core/__init__.py rename to src/power_grid_model/_core/__init__.py diff --git a/src/power_grid_model/core/buffer_handling.py b/src/power_grid_model/_core/buffer_handling.py similarity index 97% rename from src/power_grid_model/core/buffer_handling.py rename to src/power_grid_model/_core/buffer_handling.py index e6e78ba93..133b763c6 100644 --- a/src/power_grid_model/core/buffer_handling.py +++ b/src/power_grid_model/_core/buffer_handling.py @@ -6,12 +6,15 @@ Power grid model buffer handler """ -import warnings from dataclasses import dataclass from typing import cast import numpy as np +from power_grid_model._core.error_handling import VALIDATOR_MSG +from power_grid_model._core.index_integer import IdxC, IdxNp +from power_grid_model._core.power_grid_core import IdxPtr, VoidPtr +from power_grid_model._core.power_grid_meta import ComponentMetaData from power_grid_model._utils import ( _extract_data_from_component_data, _extract_indptr, @@ -19,10 +22,6 @@ is_columnar, is_sparse, ) -from power_grid_model.core.error_handling import VALIDATOR_MSG -from power_grid_model.core.index_integer import IdxC, IdxNp -from power_grid_model.core.power_grid_core import IdxPtr, VoidPtr -from power_grid_model.core.power_grid_meta import ComponentMetaData from power_grid_model.data_types import ( AttributeType, ComponentData, @@ -85,7 +84,7 @@ def _get_raw_data_view(data: np.ndarray, dtype: np.dtype) -> VoidPtr: a raw view on the data set. """ if data.dtype != dtype: - warnings.warn(f"Data type does not match schema. {VALIDATOR_MSG}", DeprecationWarning) + raise ValueError(f"Data type does not match schema. {VALIDATOR_MSG}") return np.ascontiguousarray(data, dtype=dtype).ctypes.data_as(VoidPtr) diff --git a/src/power_grid_model/core/data_handling.py b/src/power_grid_model/_core/data_handling.py similarity index 94% rename from src/power_grid_model/core/data_handling.py rename to src/power_grid_model/_core/data_handling.py index 4010c31f7..1ac336946 100644 --- a/src/power_grid_model/core/data_handling.py +++ b/src/power_grid_model/_core/data_handling.py @@ -8,10 +8,10 @@ import numpy as np +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType +from power_grid_model._core.power_grid_dataset import CConstDataset, CMutableDataset +from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model._utils import process_data_filter -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.core.power_grid_dataset import CConstDataset, CMutableDataset -from power_grid_model.core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model.data_types import Dataset, SingleDataset from power_grid_model.enum import CalculationType, ComponentAttributeFilterOptions from power_grid_model.errors import PowerGridUnreachableHitError diff --git a/src/power_grid_model/core/dataset_definitions.py b/src/power_grid_model/_core/dataset_definitions.py similarity index 100% rename from src/power_grid_model/core/dataset_definitions.py rename to src/power_grid_model/_core/dataset_definitions.py diff --git a/src/power_grid_model/core/error_handling.py b/src/power_grid_model/_core/error_handling.py similarity index 98% rename from src/power_grid_model/core/error_handling.py rename to src/power_grid_model/_core/error_handling.py index 29962582a..98e231361 100644 --- a/src/power_grid_model/core/error_handling.py +++ b/src/power_grid_model/_core/error_handling.py @@ -11,8 +11,8 @@ import numpy as np -from power_grid_model.core.index_integer import IdxNp -from power_grid_model.core.power_grid_core import power_grid_core as pgc +from power_grid_model._core.index_integer import IdxNp +from power_grid_model._core.power_grid_core import power_grid_core as pgc from power_grid_model.errors import ( AutomaticTapCalculationError, ConflictID, diff --git a/src/power_grid_model/core/index_integer.py b/src/power_grid_model/_core/index_integer.py similarity index 100% rename from src/power_grid_model/core/index_integer.py rename to src/power_grid_model/_core/index_integer.py diff --git a/src/power_grid_model/core/options.py b/src/power_grid_model/_core/options.py similarity index 95% rename from src/power_grid_model/core/options.py rename to src/power_grid_model/_core/options.py index 878c986a9..b3f71fe60 100644 --- a/src/power_grid_model/core/options.py +++ b/src/power_grid_model/_core/options.py @@ -7,7 +7,7 @@ """ from typing import Any, Callable -from power_grid_model.core.power_grid_core import OptionsPtr, power_grid_core as pgc +from power_grid_model._core.power_grid_core import OptionsPtr, power_grid_core as pgc class OptionSetter: diff --git a/src/power_grid_model/core/power_grid_core.py b/src/power_grid_model/_core/power_grid_core.py similarity index 99% rename from src/power_grid_model/core/power_grid_core.py rename to src/power_grid_model/_core/power_grid_core.py index 633f99a9b..1f1d5f06b 100644 --- a/src/power_grid_model/core/power_grid_core.py +++ b/src/power_grid_model/_core/power_grid_core.py @@ -13,7 +13,7 @@ from pathlib import Path from typing import Callable, Optional -from power_grid_model.core.index_integer import IdC, IdxC +from power_grid_model._core.index_integer import IdC, IdxC # integer index IdxPtr = POINTER(IdxC) diff --git a/src/power_grid_model/core/power_grid_dataset.py b/src/power_grid_model/_core/power_grid_dataset.py similarity index 97% rename from src/power_grid_model/core/power_grid_dataset.py rename to src/power_grid_model/_core/power_grid_dataset.py index fc58f1aaa..aa23052e8 100644 --- a/src/power_grid_model/core/power_grid_dataset.py +++ b/src/power_grid_model/_core/power_grid_dataset.py @@ -8,8 +8,7 @@ from typing import Any, Mapping, Optional -from power_grid_model._utils import get_dataset_type, is_columnar, is_nan_or_equivalent, process_data_filter -from power_grid_model.core.buffer_handling import ( +from power_grid_model._core.buffer_handling import ( BufferProperties, CAttributeBuffer, CBuffer, @@ -17,16 +16,17 @@ get_buffer_properties, get_buffer_view, ) -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType, _str_to_component_type -from power_grid_model.core.error_handling import VALIDATOR_MSG, assert_no_error -from power_grid_model.core.power_grid_core import ( +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType, _str_to_component_type +from power_grid_model._core.error_handling import VALIDATOR_MSG, assert_no_error +from power_grid_model._core.power_grid_core import ( ConstDatasetPtr, DatasetInfoPtr, MutableDatasetPtr, WritableDatasetPtr, power_grid_core as pgc, ) -from power_grid_model.core.power_grid_meta import ComponentMetaData, DatasetMetaData, power_grid_meta_data +from power_grid_model._core.power_grid_meta import ComponentMetaData, DatasetMetaData, power_grid_meta_data +from power_grid_model._utils import get_dataset_type, is_columnar, is_nan_or_equivalent, process_data_filter from power_grid_model.data_types import AttributeType, ComponentData, Dataset from power_grid_model.enum import ComponentAttributeFilterOptions from power_grid_model.typing import ComponentAttributeMapping, _ComponentAttributeMappingDict diff --git a/src/power_grid_model/core/power_grid_meta.py b/src/power_grid_model/_core/power_grid_meta.py similarity index 97% rename from src/power_grid_model/core/power_grid_meta.py rename to src/power_grid_model/_core/power_grid_meta.py index 34d66db15..0d375601b 100644 --- a/src/power_grid_model/core/power_grid_meta.py +++ b/src/power_grid_model/_core/power_grid_meta.py @@ -12,7 +12,7 @@ import numpy as np -from power_grid_model.core.dataset_definitions import ( +from power_grid_model._core.dataset_definitions import ( ComponentTypeLike, ComponentTypeVar, DatasetType, @@ -20,7 +20,7 @@ _str_to_component_type, _str_to_datatype, ) -from power_grid_model.core.power_grid_core import AttributePtr, ComponentPtr, DatasetPtr, power_grid_core as pgc +from power_grid_model._core.power_grid_core import AttributePtr, ComponentPtr, DatasetPtr, power_grid_core as pgc from power_grid_model.data_types import DenseBatchArray, SingleArray diff --git a/src/power_grid_model/core/power_grid_model.py b/src/power_grid_model/_core/power_grid_model.py similarity index 98% rename from src/power_grid_model/core/power_grid_model.py rename to src/power_grid_model/_core/power_grid_model.py index 2ea1ceb9d..cc60f6864 100644 --- a/src/power_grid_model/core/power_grid_model.py +++ b/src/power_grid_model/_core/power_grid_model.py @@ -11,23 +11,23 @@ import numpy as np -from power_grid_model.core.data_handling import ( +from power_grid_model._core.data_handling import ( create_output_data, get_output_type, prepare_input_view, prepare_output_view, prepare_update_view, ) -from power_grid_model.core.dataset_definitions import ( +from power_grid_model._core.dataset_definitions import ( ComponentType, ComponentTypeLike, _map_to_component_types, _str_to_component_type, ) -from power_grid_model.core.error_handling import PowerGridBatchError, assert_no_error, handle_errors -from power_grid_model.core.index_integer import IdNp, IdxNp -from power_grid_model.core.options import Options -from power_grid_model.core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc +from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors +from power_grid_model._core.index_integer import IdNp, IdxNp +from power_grid_model._core.options import Options +from power_grid_model._core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc from power_grid_model.data_types import Dataset, SingleDataset from power_grid_model.enum import ( CalculationMethod, diff --git a/src/power_grid_model/core/serialization.py b/src/power_grid_model/_core/serialization.py similarity index 96% rename from src/power_grid_model/core/serialization.py rename to src/power_grid_model/_core/serialization.py index 914c238df..78190083f 100644 --- a/src/power_grid_model/core/serialization.py +++ b/src/power_grid_model/_core/serialization.py @@ -10,17 +10,17 @@ from ctypes import byref from enum import IntEnum -from power_grid_model.core.dataset_definitions import DatasetType, _map_to_component_types, _str_to_datatype -from power_grid_model.core.error_handling import assert_no_error -from power_grid_model.core.index_integer import IdxC -from power_grid_model.core.power_grid_core import ( +from power_grid_model._core.dataset_definitions import DatasetType, _map_to_component_types, _str_to_datatype +from power_grid_model._core.error_handling import assert_no_error +from power_grid_model._core.index_integer import IdxC +from power_grid_model._core.power_grid_core import ( CharPtr, DeserializerPtr, SerializerPtr, WritableDatasetPtr, power_grid_core as pgc, ) -from power_grid_model.core.power_grid_dataset import CConstDataset, CWritableDataset +from power_grid_model._core.power_grid_dataset import CConstDataset, CWritableDataset from power_grid_model.data_types import Dataset from power_grid_model.errors import PowerGridSerializationError from power_grid_model.typing import ComponentAttributeMapping diff --git a/src/power_grid_model/_utils.py b/src/power_grid_model/_utils.py index 7f79eebe2..6f241fb2a 100644 --- a/src/power_grid_model/_utils.py +++ b/src/power_grid_model/_utils.py @@ -15,9 +15,9 @@ import numpy as np -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.core.error_handling import VALIDATOR_MSG -from power_grid_model.core.power_grid_meta import initialize_array, power_grid_meta_data +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType +from power_grid_model._core.error_handling import VALIDATOR_MSG +from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model.data_types import ( BatchColumn, BatchComponentData, diff --git a/src/power_grid_model/data_types.py b/src/power_grid_model/data_types.py index 3e51cbb83..426bc1032 100644 --- a/src/power_grid_model/data_types.py +++ b/src/power_grid_model/data_types.py @@ -13,7 +13,7 @@ import numpy as np -from power_grid_model.core.dataset_definitions import ComponentTypeVar +from power_grid_model._core.dataset_definitions import ComponentTypeVar AttributeType: TypeAlias = str """ diff --git a/src/power_grid_model/typing.py b/src/power_grid_model/typing.py index 06e58443a..530e8e775 100644 --- a/src/power_grid_model/typing.py +++ b/src/power_grid_model/typing.py @@ -8,13 +8,13 @@ This includes all miscellaneous type hints not under dataset or categories. """ -from power_grid_model.core.dataset_definitions import ( # pylint: disable=unused-import +from power_grid_model._core.dataset_definitions import ( # pylint: disable=unused-import ComponentType, ComponentTypeVar, DatasetType, DatasetTypeVar, ) -from power_grid_model.core.power_grid_meta import ( # pylint: disable=unused-import +from power_grid_model._core.power_grid_meta import ( # pylint: disable=unused-import ComponentMetaData, DatasetMetaData, PowerGridMetaData, diff --git a/src/power_grid_model/utils.py b/src/power_grid_model/utils.py index 53a6a7df5..c724e0ca9 100644 --- a/src/power_grid_model/utils.py +++ b/src/power_grid_model/utils.py @@ -16,6 +16,13 @@ import numpy as np from power_grid_model import CalculationMethod, PowerGridModel +from power_grid_model._core.dataset_definitions import DatasetType, _map_to_component_types +from power_grid_model._core.serialization import ( # pylint: disable=unused-import + json_deserialize, + json_serialize, + msgpack_deserialize, + msgpack_serialize, +) from power_grid_model._utils import ( _extract_data_from_component_data, _extract_indptr, @@ -25,13 +32,6 @@ is_columnar, is_sparse, ) -from power_grid_model.core.dataset_definitions import DatasetType, _map_to_component_types -from power_grid_model.core.serialization import ( # pylint: disable=unused-import - json_deserialize, - json_serialize, - msgpack_deserialize, - msgpack_serialize, -) from power_grid_model.data_types import ( BatchComponentData, BatchDataset, diff --git a/src/power_grid_model/validation/rules.py b/src/power_grid_model/validation/rules.py index 7046fb9f6..b506fe1ad 100644 --- a/src/power_grid_model/validation/rules.py +++ b/src/power_grid_model/validation/rules.py @@ -70,12 +70,12 @@ ValidationError, ) from power_grid_model.validation.utils import ( - eval_expression, - get_indexer, - get_mask, - get_valid_ids, - nan_type, - set_default_value, + _eval_expression, + _get_indexer, + _get_mask, + _get_valid_ids, + _nan_type, + _set_default_value, ) Error = TypeVar("Error", bound=ValidationError) @@ -370,17 +370,17 @@ def none_match_comparison( # pylint: disable=too-many-arguments where the value in the field of interest matched the comparison. """ if default_value_1 is not None: - set_default_value(data=data, component=component, field=field, default_value=default_value_1) + _set_default_value(data=data, component=component, field=field, default_value=default_value_1) if default_value_2 is not None: - set_default_value(data=data, component=component, field=field, default_value=default_value_2) + _set_default_value(data=data, component=component, field=field, default_value=default_value_2) component_data = data[component] if not isinstance(component_data, np.ndarray): raise NotImplementedError() # TODO(mgovers): add support for columnar data if isinstance(ref_value, tuple): - ref = tuple(eval_expression(component_data, v) for v in ref_value) + ref = tuple(_eval_expression(component_data, v) for v in ref_value) else: - ref = (eval_expression(component_data, ref_value),) + ref = (_eval_expression(component_data, ref_value),) matches = compare_fn(component_data[field], *ref) if matches.any(): if matches.ndim > 1: @@ -520,7 +520,7 @@ def all_valid_enum_values( """ enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] - valid = {nan_type(component, field)} + valid = {_nan_type(component, field)} for enum_type in enums: valid.update(list(enum_type)) @@ -557,13 +557,13 @@ def all_valid_associated_enum_values( # pylint: disable=too-many-positional-arg """ enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] - valid_ids = get_valid_ids(data=data, ref_components=ref_components) + valid_ids = _get_valid_ids(data=data, ref_components=ref_components) mask = np.logical_and( - get_mask(data=data, component=component, field=field, **filters), + _get_mask(data=data, component=component, field=field, **filters), np.isin(data[component][ref_object_id_field], valid_ids), ) - valid = {nan_type(component, field)} + valid = {_nan_type(component, field)} for enum_type in enums: valid.update(list(enum_type)) @@ -596,8 +596,8 @@ def all_valid_ids( A list containing zero or one InvalidIdError, listing all ids where the value in the field of interest was not a valid object identifier. """ - valid_ids = get_valid_ids(data=data, ref_components=ref_components) - mask = get_mask(data=data, component=component, field=field, **filters) + valid_ids = _get_valid_ids(data=data, ref_components=ref_components) + mask = _get_mask(data=data, component=component, field=field, **filters) # Find any values that can't be found in the set of ids invalid = np.logical_and(mask, np.isin(data[component][field], valid_ids, invert=True)) @@ -779,7 +779,7 @@ def none_missing( for field in fields: if isinstance(field, list): field = field[0] - nan = nan_type(component, field) + nan = _nan_type(component, field) if np.isnan(nan): invalid = np.isnan(data[component][field][index]) else: @@ -939,14 +939,14 @@ def all_supported_tap_control_side( # pylint: disable=too-many-arguments A list containing zero or more InvalidAssociatedEnumValueErrors; listing all the ids of components where the field of interest was invalid, given the referenced object's field. """ - mask = get_mask(data=data, component=component, field=control_side_field, **filters) + mask = _get_mask(data=data, component=component, field=control_side_field, **filters) values = data[component][control_side_field][mask] invalid = np.zeros_like(mask) for ref_component, ref_field in tap_side_fields: if ref_component in data: - indices = get_indexer(data[ref_component]["id"], data[component][regulated_object_field], default_value=-1) + indices = _get_indexer(data[ref_component]["id"], data[component][regulated_object_field], default_value=-1) found = indices != -1 ref_comp_values = data[ref_component][ref_field][indices[found]] invalid[found] = np.logical_or(invalid[found], values[found] == ref_comp_values) diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index 16fdee684..2d2df85b8 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -11,7 +11,7 @@ import numpy as np from power_grid_model import power_grid_meta_data -from power_grid_model.core.dataset_definitions import ( +from power_grid_model._core.dataset_definitions import ( ComponentType, ComponentTypeLike, ComponentTypeVar, @@ -22,7 +22,7 @@ from power_grid_model.validation.errors import ValidationError -def eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarray: +def _eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarray: """ Wrapper function that checks the type of the 'expression'. If the expression is a string, it is assumed to be a field expression and the expression is validated. Otherwise it is assumed to be a numerical value and the value @@ -42,11 +42,11 @@ def eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarr """ if isinstance(expression, str): - return eval_field_expression(data, expression) + return _eval_field_expression(data, expression) return np.array(expression) -def eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: +def _eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: """ A field expression can either be the name of a field (e.g. 'field_x') in the data, or a ratio between two fields (e.g. 'field_x / field_y'). The expression is checked on validity and then the fields are checked to be present in @@ -92,18 +92,18 @@ def eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: return np.true_divide(data[fields[0]], data[fields[1]]) -def update_input_data(input_data: SingleDataset, update_data: SingleDataset): +def _update_input_data(input_data: SingleDataset, update_data: SingleDataset): """ Update the input data using the available non-nan values in the update data. """ merged_data = {component: array.copy() for component, array in input_data.items()} for component in update_data.keys(): - update_component_data(component, merged_data[component], update_data[component]) + _update_component_data(component, merged_data[component], update_data[component]) return merged_data -def update_component_data( +def _update_component_data( component: ComponentTypeLike, input_data: SingleComponentData, update_data: SingleComponentData ) -> None: """ @@ -133,7 +133,7 @@ def _update_component_array_data( for field in update_data.dtype.names: if field == "id": continue - nan = nan_type(component, field, DatasetType.update) + nan = _nan_type(component, field, DatasetType.update) if np.isnan(nan): mask = ~np.isnan(update_data[field]) else: @@ -143,12 +143,12 @@ def _update_component_array_data( for phase in range(mask.shape[1]): # find indexers of to-be-updated object sub_mask = mask[:, phase] - idx = get_indexer(input_data["id"], update_data_ids[sub_mask]) + idx = _get_indexer(input_data["id"], update_data_ids[sub_mask]) # update input_data[field][idx, phase] = update_data[field][sub_mask, phase] else: # find indexers of to-be-updated object - idx = get_indexer(input_data["id"], update_data_ids[mask]) + idx = _get_indexer(input_data["id"], update_data_ids[mask]) # update input_data[field][idx] = update_data[field][mask] @@ -190,7 +190,7 @@ def errors_to_string( return msg -def nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = DatasetType.input): +def _nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = DatasetType.input): """ Helper function to retrieve the nan value for a certain field as defined in the power_grid_meta_data. """ @@ -198,7 +198,7 @@ def nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = return power_grid_meta_data[data_type][component].nans[field] -def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[int] = None) -> np.ndarray: +def _get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[int] = None) -> np.ndarray: """ Given array of values from a source and a target dataset. Find the position of each value in the target dataset in the context of the source dataset. @@ -209,7 +209,7 @@ def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[ >>> input_ids = [1, 2, 3, 4, 5] >>> update_ids = [3] - >>> assert get_indexer(input_ids, update_ids) == np.array([2]) + >>> assert _get_indexer(input_ids, update_ids) == np.array([2]) Args: source: array of values in the source dataset @@ -238,7 +238,7 @@ def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[ return np.where(source[clipped_indices] == target, permutation_sort[clipped_indices], default_value) -def set_default_value( +def _set_default_value( data: SingleDataset, component: ComponentTypeLike, field: str, default_value: int | float | np.ndarray ): """ @@ -256,17 +256,17 @@ def set_default_value( Returns: """ - if np.isnan(nan_type(component, field)): + if np.isnan(_nan_type(component, field)): mask = np.isnan(data[component][field]) else: - mask = data[component][field] == nan_type(component, field) + mask = data[component][field] == _nan_type(component, field) if isinstance(default_value, np.ndarray): data[component][field][mask] = default_value[mask] else: data[component][field][mask] = default_value -def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ComponentTypeVar]) -> list[int]: +def _get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ComponentTypeVar]) -> list[int]: """ This function returns the valid IDs specified by all ref_components @@ -286,7 +286,7 @@ def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ valid_ids = set() for ref_component in ref_components: if ref_component in data: - nan = nan_type(ref_component, "id") + nan = _nan_type(ref_component, "id") if np.isnan(nan): mask = ~np.isnan(data[ref_component]["id"]) else: @@ -296,7 +296,7 @@ def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ return list(valid_ids) -def get_mask(data: SingleDataset, component: ComponentTypeLike, field: str, **filters: Any) -> np.ndarray: +def _get_mask(data: SingleDataset, component: ComponentTypeLike, field: str, **filters: Any) -> np.ndarray: """ Get a mask based on the specified filters. E.g. measured_terminal_type=MeasuredTerminalType.source. diff --git a/src/power_grid_model/validation/validation.py b/src/power_grid_model/validation/validation.py index 5e76af9a8..10dba4a2e 100644 --- a/src/power_grid_model/validation/validation.py +++ b/src/power_grid_model/validation/validation.py @@ -19,7 +19,10 @@ import numpy as np from power_grid_model import ComponentType, DatasetType, power_grid_meta_data -from power_grid_model._utils import compatibility_convert_row_columnar_dataset, convert_batch_dataset_to_batch_list +from power_grid_model._utils import ( + compatibility_convert_row_columnar_dataset as _compatibility_convert_row_columnar_dataset, + convert_batch_dataset_to_batch_list as _convert_batch_dataset_to_batch_list, +) from power_grid_model.data_types import BatchDataset, Dataset, SingleDataset from power_grid_model.enum import ( Branch3Side, @@ -39,30 +42,30 @@ ValidationError, ) from power_grid_model.validation.rules import ( - all_between, - all_between_or_at, - all_boolean, - all_cross_unique, - all_enabled_identical, - all_finite, - all_greater_or_equal, - all_greater_than_or_equal_to_zero, - all_greater_than_zero, - all_less_than, - all_not_two_values_equal, - all_not_two_values_zero, - all_supported_tap_control_side, - all_unique, - all_valid_associated_enum_values, - all_valid_clocks, - all_valid_enum_values, - all_valid_fault_phases, - all_valid_ids, - ids_valid_in_update_data_set, - none_missing, - valid_p_q_sigma, + all_between as _all_between, + all_between_or_at as _all_between_or_at, + all_boolean as _all_boolean, + all_cross_unique as _all_cross_unique, + all_enabled_identical as _all_enabled_identical, + all_finite as _all_finite, + all_greater_or_equal as _all_greater_or_equal, + all_greater_than_or_equal_to_zero as _all_greater_than_or_equal_to_zero, + all_greater_than_zero as _all_greater_than_zero, + all_less_than as _all_less_than, + all_not_two_values_equal as _all_not_two_values_equal, + all_not_two_values_zero as _all_not_two_values_zero, + all_supported_tap_control_side as _all_supported_tap_control_side, + all_unique as _all_unique, + all_valid_associated_enum_values as _all_valid_associated_enum_values, + all_valid_clocks as _all_valid_clocks, + all_valid_enum_values as _all_valid_enum_values, + all_valid_fault_phases as _all_valid_fault_phases, + all_valid_ids as _all_valid_ids, + ids_valid_in_update_data_set as _ids_valid_in_update_data_set, + none_missing as _none_missing, + valid_p_q_sigma as _valid_p_q_sigma, ) -from power_grid_model.validation.utils import update_input_data +from power_grid_model.validation.utils import _update_input_data def validate_input_data( @@ -88,7 +91,7 @@ def validate_input_data( Error: KeyError | TypeError | ValueError: if the data structure is invalid. """ # Convert to row based if in columnar or mixed format format - row_input_data = compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) + row_input_data = _compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) # A deep copy is made of the input data, since default values will be added in the validation process input_data_copy = copy.deepcopy(row_input_data) @@ -136,7 +139,7 @@ def validate_batch_data( Error: KeyError | TypeError | ValueError: if the data structure is invalid. """ # Convert to row based if in columnar or mixed format - row_input_data = compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) + row_input_data = _compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) # A deep copy is made of the input data, since default values will be added in the validation process input_data_copy = copy.deepcopy(row_input_data) @@ -144,11 +147,11 @@ def validate_batch_data( input_errors: list[ValidationError] = list(validate_unique_ids_across_components(input_data_copy)) - batch_data = convert_batch_dataset_to_batch_list(update_data, DatasetType.update) + batch_data = _convert_batch_dataset_to_batch_list(update_data, DatasetType.update) errors = {} for batch, batch_update_data in enumerate(batch_data): - row_update_data = compatibility_convert_row_columnar_dataset(batch_update_data, None, DatasetType.update) + row_update_data = _compatibility_convert_row_columnar_dataset(batch_update_data, None, DatasetType.update) assert_valid_data_structure(row_update_data, DatasetType.update) id_errors: list[IdNotInDatasetError | InvalidIdError] = validate_ids(row_update_data, input_data_copy) @@ -156,7 +159,7 @@ def validate_batch_data( if not id_errors: batch_errors = input_errors - merged_data = update_input_data(input_data_copy, row_update_data) + merged_data = _update_input_data(input_data_copy, row_update_data) batch_errors += validate_required_values(merged_data, calculation_type, symmetric) batch_errors += validate_values(merged_data, calculation_type) @@ -216,7 +219,7 @@ def validate_unique_ids_across_components(data: SingleDataset) -> list[MultiComp An empty list if all ids are unique, or a list of MultiComponentNotUniqueErrors for all components that have non-unique ids """ - return all_cross_unique(data, [(component, "id") for component in data]) + return _all_cross_unique(data, [(component, "id") for component in data]) def validate_ids(update_data: SingleDataset, input_data: SingleDataset) -> list[IdNotInDatasetError | InvalidIdError]: @@ -237,7 +240,7 @@ def validate_ids(update_data: SingleDataset, input_data: SingleDataset) -> list[ """ errors = ( - ids_valid_in_update_data_set(update_data, input_data, component, "update_data") for component in update_data + _ids_valid_in_update_data_set(update_data, input_data, component, "update_data") for component in update_data ) return list(chain(*errors)) @@ -433,7 +436,7 @@ def is_nested_list(items): def process_nested_items(component, items, data, results): for index, item in enumerate(sublist for sublist in items): if index < len(data[component]): - results.append(none_missing(data, component, item, index)) + results.append(_none_missing(data, component, item, index)) results = [] @@ -443,7 +446,7 @@ def process_nested_items(component, items, data, results): if is_nested_list(items): process_nested_items(component, items, data, results) else: - results.append(none_missing(data, component, items, 0)) + results.append(_none_missing(data, component, items, 0)) return list(chain(*results)) @@ -461,7 +464,7 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT """ errors: list[ValidationError] = list( - all_finite( + _all_finite( data, { ComponentType.sym_power_sensor: ["power_sigma"], @@ -514,74 +517,74 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT def validate_base(data: SingleDataset, component: ComponentType) -> list[ValidationError]: - errors: list[ValidationError] = list(all_unique(data, component, "id")) + errors: list[ValidationError] = list(_all_unique(data, component, "id")) return errors def validate_node(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.node) - errors += all_greater_than_zero(data, ComponentType.node, "u_rated") + errors += _all_greater_than_zero(data, ComponentType.node, "u_rated") return errors def validate_branch(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids(data, component, "from_node", ComponentType.node) - errors += all_valid_ids(data, component, "to_node", ComponentType.node) - errors += all_not_two_values_equal(data, component, "to_node", "from_node") - errors += all_boolean(data, component, "from_status") - errors += all_boolean(data, component, "to_status") + errors += _all_valid_ids(data, component, "from_node", ComponentType.node) + errors += _all_valid_ids(data, component, "to_node", ComponentType.node) + errors += _all_not_two_values_equal(data, component, "to_node", "from_node") + errors += _all_boolean(data, component, "from_status") + errors += _all_boolean(data, component, "to_status") return errors def validate_line(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.line) - errors += all_not_two_values_zero(data, ComponentType.line, "r1", "x1") - errors += all_not_two_values_zero(data, ComponentType.line, "r0", "x0") - errors += all_greater_than_zero(data, ComponentType.line, "i_n") + errors += _all_not_two_values_zero(data, ComponentType.line, "r1", "x1") + errors += _all_not_two_values_zero(data, ComponentType.line, "r0", "x0") + errors += _all_greater_than_zero(data, ComponentType.line, "i_n") return errors def validate_generic_branch(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.generic_branch) - errors += all_greater_than_zero(data, ComponentType.generic_branch, "k") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.generic_branch, "sn") + errors += _all_greater_than_zero(data, ComponentType.generic_branch, "k") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.generic_branch, "sn") return errors def validate_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.transformer) - errors += all_greater_than_zero(data, ComponentType.transformer, "u1") - errors += all_greater_than_zero(data, ComponentType.transformer, "u2") - errors += all_greater_than_zero(data, ComponentType.transformer, "sn") - errors += all_greater_or_equal(data, ComponentType.transformer, "uk", "pk/sn") - errors += all_between(data, ComponentType.transformer, "uk", 0, 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "pk") - errors += all_greater_or_equal(data, ComponentType.transformer, "i0", "p0/sn") - errors += all_less_than(data, ComponentType.transformer, "i0", 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "p0") - errors += all_valid_enum_values(data, ComponentType.transformer, "winding_from", WindingType) - errors += all_valid_enum_values(data, ComponentType.transformer, "winding_to", WindingType) - errors += all_between_or_at(data, ComponentType.transformer, "clock", 0, 12) - errors += all_valid_clocks(data, ComponentType.transformer, "clock", "winding_from", "winding_to") - errors += all_valid_enum_values(data, ComponentType.transformer, "tap_side", BranchSide) - errors += all_between_or_at( + errors += _all_greater_than_zero(data, ComponentType.transformer, "u1") + errors += _all_greater_than_zero(data, ComponentType.transformer, "u2") + errors += _all_greater_than_zero(data, ComponentType.transformer, "sn") + errors += _all_greater_or_equal(data, ComponentType.transformer, "uk", "pk/sn") + errors += _all_between(data, ComponentType.transformer, "uk", 0, 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "pk") + errors += _all_greater_or_equal(data, ComponentType.transformer, "i0", "p0/sn") + errors += _all_less_than(data, ComponentType.transformer, "i0", 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "p0") + errors += _all_valid_enum_values(data, ComponentType.transformer, "winding_from", WindingType) + errors += _all_valid_enum_values(data, ComponentType.transformer, "winding_to", WindingType) + errors += _all_between_or_at(data, ComponentType.transformer, "clock", 0, 12) + errors += _all_valid_clocks(data, ComponentType.transformer, "clock", "winding_from", "winding_to") + errors += _all_valid_enum_values(data, ComponentType.transformer, "tap_side", BranchSide) + errors += _all_between_or_at( data, ComponentType.transformer, "tap_pos", "tap_min", "tap_max", data[ComponentType.transformer]["tap_nom"], 0 ) - errors += all_between_or_at(data, ComponentType.transformer, "tap_nom", "tap_min", "tap_max", 0) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "tap_size") - errors += all_greater_or_equal( + errors += _all_between_or_at(data, ComponentType.transformer, "tap_nom", "tap_min", "tap_max", 0) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "tap_size") + errors += _all_greater_or_equal( data, ComponentType.transformer, "uk_min", "pk_min/sn", data[ComponentType.transformer]["uk"] ) - errors += all_between(data, ComponentType.transformer, "uk_min", 0, 1, data[ComponentType.transformer]["uk"]) - errors += all_greater_or_equal( + errors += _all_between(data, ComponentType.transformer, "uk_min", 0, 1, data[ComponentType.transformer]["uk"]) + errors += _all_greater_or_equal( data, ComponentType.transformer, "uk_max", "pk_max/sn", data[ComponentType.transformer]["uk"] ) - errors += all_between(data, ComponentType.transformer, "uk_max", 0, 1, data[ComponentType.transformer]["uk"]) - errors += all_greater_than_or_equal_to_zero( + errors += _all_between(data, ComponentType.transformer, "uk_max", 0, 1, data[ComponentType.transformer]["uk"]) + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer, "pk_min", data[ComponentType.transformer]["pk"] ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer, "pk_max", data[ComponentType.transformer]["pk"] ) return errors @@ -589,51 +592,51 @@ def validate_transformer(data: SingleDataset) -> list[ValidationError]: def validate_branch3(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids(data, component, "node_1", ComponentType.node) - errors += all_valid_ids(data, component, "node_2", ComponentType.node) - errors += all_valid_ids(data, component, "node_3", ComponentType.node) - errors += all_not_two_values_equal(data, component, "node_1", "node_2") - errors += all_not_two_values_equal(data, component, "node_1", "node_3") - errors += all_not_two_values_equal(data, component, "node_2", "node_3") - errors += all_boolean(data, component, "status_1") - errors += all_boolean(data, component, "status_2") - errors += all_boolean(data, component, "status_3") + errors += _all_valid_ids(data, component, "node_1", ComponentType.node) + errors += _all_valid_ids(data, component, "node_2", ComponentType.node) + errors += _all_valid_ids(data, component, "node_3", ComponentType.node) + errors += _all_not_two_values_equal(data, component, "node_1", "node_2") + errors += _all_not_two_values_equal(data, component, "node_1", "node_3") + errors += _all_not_two_values_equal(data, component, "node_2", "node_3") + errors += _all_boolean(data, component, "status_1") + errors += _all_boolean(data, component, "status_2") + errors += _all_boolean(data, component, "status_3") return errors # pylint: disable=R0915 def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch3(data, ComponentType.three_winding_transformer) - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u3") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_1") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_2") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_3") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_1") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_2") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_1") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_3") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_2") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_3") - errors += all_between(data, ComponentType.three_winding_transformer, "uk_12", 0, 1) - errors += all_between(data, ComponentType.three_winding_transformer, "uk_13", 0, 1) - errors += all_between(data, ComponentType.three_winding_transformer, "uk_23", 0, 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_12") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_13") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_23") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "i0", "p0/sn_1") - errors += all_less_than(data, ComponentType.three_winding_transformer, "i0", 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "p0") - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_1", WindingType) - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_2", WindingType) - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_3", WindingType) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "clock_12", 0, 12) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "clock_13", 0, 12) - errors += all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_12", "winding_1", "winding_2") - errors += all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_13", "winding_1", "winding_3") - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "tap_side", Branch3Side) - errors += all_between_or_at( + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u3") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_1") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_2") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_3") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_1") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_2") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_1") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_3") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_2") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_3") + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_12", 0, 1) + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_13", 0, 1) + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_23", 0, 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_12") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_13") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_23") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "i0", "p0/sn_1") + errors += _all_less_than(data, ComponentType.three_winding_transformer, "i0", 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "p0") + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_1", WindingType) + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_2", WindingType) + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_3", WindingType) + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "clock_12", 0, 12) + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "clock_13", 0, 12) + errors += _all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_12", "winding_1", "winding_2") + errors += _all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_13", "winding_1", "winding_3") + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "tap_side", Branch3Side) + errors += _all_between_or_at( data, ComponentType.three_winding_transformer, "tap_pos", @@ -642,51 +645,51 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr data[ComponentType.three_winding_transformer]["tap_nom"], 0, ) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "tap_nom", "tap_min", "tap_max", 0) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "tap_size") - errors += all_greater_or_equal( + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "tap_nom", "tap_min", "tap_max", 0) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "tap_size") + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_min", "pk_12_min/sn_1", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_min", "pk_12_min/sn_2", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_min", "pk_13_min/sn_1", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_min", "pk_13_min/sn_3", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_min", "pk_23_min/sn_2", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_min", "pk_23_min/sn_3", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_12_min", @@ -694,7 +697,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_13_min", @@ -702,7 +705,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_23_min", @@ -710,49 +713,49 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_max", "pk_12_max/sn_1", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_max", "pk_12_max/sn_2", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_max", "pk_13_max/sn_1", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_max", "pk_13_max/sn_3", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_max", "pk_23_max/sn_2", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_max", "pk_23_max/sn_3", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_12_max", @@ -760,7 +763,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_13_max", @@ -768,7 +771,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_23_max", @@ -776,37 +779,37 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_12_min", data[ComponentType.three_winding_transformer]["pk_12"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_13_min", data[ComponentType.three_winding_transformer]["pk_13"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_23_min", data[ComponentType.three_winding_transformer]["pk_23"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_12_max", data[ComponentType.three_winding_transformer]["pk_12"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_13_max", data[ComponentType.three_winding_transformer]["pk_13"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_23_max", @@ -817,23 +820,23 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr def validate_appliance(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_boolean(data, component, "status") - errors += all_valid_ids(data, component, "node", ComponentType.node) + errors += _all_boolean(data, component, "status") + errors += _all_valid_ids(data, component, "node", ComponentType.node) return errors def validate_source(data: SingleDataset) -> list[ValidationError]: errors = validate_appliance(data, ComponentType.source) - errors += all_greater_than_zero(data, ComponentType.source, "u_ref") - errors += all_greater_than_zero(data, ComponentType.source, "sk") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.source, "rx_ratio") - errors += all_greater_than_zero(data, ComponentType.source, "z01_ratio") + errors += _all_greater_than_zero(data, ComponentType.source, "u_ref") + errors += _all_greater_than_zero(data, ComponentType.source, "sk") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.source, "rx_ratio") + errors += _all_greater_than_zero(data, ComponentType.source, "z01_ratio") return errors def validate_generic_load_gen(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_appliance(data, component) - errors += all_valid_enum_values(data, component, "type", LoadGenType) + errors += _all_valid_enum_values(data, component, "type", LoadGenType) return errors @@ -844,17 +847,17 @@ def validate_shunt(data: SingleDataset) -> list[ValidationError]: def validate_generic_voltage_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_greater_than_zero(data, component, "u_sigma") - errors += all_greater_than_zero(data, component, "u_measured") - errors += all_valid_ids(data, component, "measured_object", ComponentType.node) + errors += _all_greater_than_zero(data, component, "u_sigma") + errors += _all_greater_than_zero(data, component, "u_measured") + errors += _all_valid_ids(data, component, "measured_object", ComponentType.node) return errors def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_greater_than_zero(data, component, "power_sigma") - errors += all_valid_enum_values(data, component, "measured_terminal_type", MeasuredTerminalType) - errors += all_valid_ids( + errors += _all_greater_than_zero(data, component, "power_sigma") + errors += _all_valid_enum_values(data, component, "measured_terminal_type", MeasuredTerminalType) + errors += _all_valid_ids( data, component, field="measured_object", @@ -872,70 +875,70 @@ def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) ComponentType.asym_gen, ], ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.line, ComponentType.generic_branch, ComponentType.transformer], measured_terminal_type=MeasuredTerminalType.branch_from, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.line, ComponentType.generic_branch, ComponentType.transformer], measured_terminal_type=MeasuredTerminalType.branch_to, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.source, measured_terminal_type=MeasuredTerminalType.source, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.shunt, measured_terminal_type=MeasuredTerminalType.shunt, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.sym_load, ComponentType.asym_load], measured_terminal_type=MeasuredTerminalType.load, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.sym_gen, ComponentType.asym_gen], measured_terminal_type=MeasuredTerminalType.generator, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_1, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_2, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_3, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", @@ -943,27 +946,27 @@ def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) measured_terminal_type=MeasuredTerminalType.node, ) if component in ("sym_power_sensor", "asym_power_sensor"): - errors += valid_p_q_sigma(data, component) + errors += _valid_p_q_sigma(data, component) return errors def validate_fault(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.fault) - errors += all_boolean(data, ComponentType.fault, "status") - errors += all_valid_enum_values(data, ComponentType.fault, "fault_type", FaultType) - errors += all_valid_enum_values(data, ComponentType.fault, "fault_phase", FaultPhase) - errors += all_valid_fault_phases(data, ComponentType.fault, "fault_type", "fault_phase") - errors += all_valid_ids(data, ComponentType.fault, field="fault_object", ref_components=ComponentType.node) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.fault, "r_f") - errors += all_enabled_identical(data, ComponentType.fault, "fault_type", "status") - errors += all_enabled_identical(data, ComponentType.fault, "fault_phase", "status") + errors += _all_boolean(data, ComponentType.fault, "status") + errors += _all_valid_enum_values(data, ComponentType.fault, "fault_type", FaultType) + errors += _all_valid_enum_values(data, ComponentType.fault, "fault_phase", FaultPhase) + errors += _all_valid_fault_phases(data, ComponentType.fault, "fault_type", "fault_phase") + errors += _all_valid_ids(data, ComponentType.fault, field="fault_object", ref_components=ComponentType.node) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.fault, "r_f") + errors += _all_enabled_identical(data, ComponentType.fault, "fault_type", "status") + errors += _all_enabled_identical(data, ComponentType.fault, "fault_phase", "status") return errors def validate_regulator(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="regulated_object", @@ -974,11 +977,11 @@ def validate_regulator(data: SingleDataset, component: ComponentType) -> list[Va def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationError]: errors = validate_regulator(data, ComponentType.transformer_tap_regulator) - errors += all_boolean(data, ComponentType.transformer_tap_regulator, "status") - errors += all_valid_enum_values( + errors += _all_boolean(data, ComponentType.transformer_tap_regulator, "status") + errors += _all_valid_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", [BranchSide, Branch3Side] ) - errors += all_valid_associated_enum_values( + errors += _all_valid_associated_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", @@ -986,7 +989,7 @@ def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationEr [ComponentType.transformer], [BranchSide], ) - errors += all_valid_associated_enum_values( + errors += _all_valid_associated_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", @@ -994,15 +997,15 @@ def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationEr [ComponentType.three_winding_transformer], [Branch3Side], ) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer_tap_regulator, "u_set") - errors += all_greater_than_zero(data, ComponentType.transformer_tap_regulator, "u_band") - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer_tap_regulator, "u_set") + errors += _all_greater_than_zero(data, ComponentType.transformer_tap_regulator, "u_band") + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer_tap_regulator, "line_drop_compensation_r", 0.0 ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer_tap_regulator, "line_drop_compensation_x", 0.0 ) - errors += all_supported_tap_control_side( + errors += _all_supported_tap_control_side( data, ComponentType.transformer_tap_regulator, "control_side", diff --git a/tests/unit/deprecated/test_end_to_end.py b/tests/unit/deprecated/test_end_to_end.py index f3d3013ac..d8516b818 100644 --- a/tests/unit/deprecated/test_end_to_end.py +++ b/tests/unit/deprecated/test_end_to_end.py @@ -6,8 +6,8 @@ import pytest +from power_grid_model._core.power_grid_model import PowerGridModel from power_grid_model._utils import convert_batch_dataset_to_batch_list -from power_grid_model.core.power_grid_model import PowerGridModel from power_grid_model.utils import import_json_data from ..utils import compare_result diff --git a/tests/unit/test_buffer_handling.py b/tests/unit/test_buffer_handling.py index 010333be5..95bfa65e3 100644 --- a/tests/unit/test_buffer_handling.py +++ b/tests/unit/test_buffer_handling.py @@ -9,9 +9,9 @@ import numpy as np import pytest -from power_grid_model.core.buffer_handling import _get_dense_buffer_properties, _get_sparse_buffer_properties -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.core.power_grid_meta import initialize_array, power_grid_meta_data +from power_grid_model._core.buffer_handling import _get_dense_buffer_properties, _get_sparse_buffer_properties +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType +from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data def load_data(component_type, is_batch, is_sparse, is_columnar): diff --git a/tests/unit/test_data_handling.py b/tests/unit/test_data_handling.py index 32959b961..1dd8fdf68 100644 --- a/tests/unit/test_data_handling.py +++ b/tests/unit/test_data_handling.py @@ -8,12 +8,12 @@ import numpy as np import pytest +from power_grid_model._core.data_handling import create_output_data +from power_grid_model._core.dataset_definitions import ComponentType as CT, DatasetType as DT +from power_grid_model._core.power_grid_core import VoidPtr +from power_grid_model._core.power_grid_dataset import CMutableDataset +from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model._utils import is_columnar -from power_grid_model.core.data_handling import create_output_data -from power_grid_model.core.dataset_definitions import ComponentType as CT, DatasetType as DT -from power_grid_model.core.power_grid_core import VoidPtr -from power_grid_model.core.power_grid_dataset import CMutableDataset -from power_grid_model.core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model.enum import ComponentAttributeFilterOptions @@ -168,5 +168,5 @@ def test_dtype_compatibility_check_compatible(): def test_dtype_compatibility_check__error(): nodes = initialize_array(DT.sym_output, CT.node, (1, 2)) data = {CT.node: nodes.astype(nodes.dtype.newbyteorder("S"))} - with pytest.warns(DeprecationWarning): + with pytest.raises(ValueError): CMutableDataset(data, DT.sym_output) diff --git a/tests/unit/test_dataset.py b/tests/unit/test_dataset.py index 0fda8b9fa..88bef7c7e 100644 --- a/tests/unit/test_dataset.py +++ b/tests/unit/test_dataset.py @@ -5,9 +5,9 @@ import numpy as np import pytest -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.core.power_grid_dataset import CConstDataset -from power_grid_model.core.power_grid_meta import power_grid_meta_data +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType +from power_grid_model._core.power_grid_dataset import CConstDataset +from power_grid_model._core.power_grid_meta import power_grid_meta_data from power_grid_model.errors import PowerGridError @@ -54,7 +54,7 @@ def test_const_dataset__conflicting_data(): with pytest.raises(PowerGridError): CConstDataset( data={ - "node": np.zeros(1, dtype=power_grid_meta_data["input"]["node"]), + ComponentType.node: np.zeros(1, dtype=power_grid_meta_data["input"][ComponentType.node]), "sym_load": np.zeros(1, dtype=power_grid_meta_data["update"]["sym_load"]), } ) @@ -108,7 +108,7 @@ def test_const_dataset__sparse_batch_data(dataset_type): components = {ComponentType.node: 3, ComponentType.sym_load: 2, ComponentType.asym_load: 4, ComponentType.link: 4} data = { ComponentType.node: { - "data": np.zeros(shape=3, dtype=power_grid_meta_data[dataset_type]["node"]), + "data": np.zeros(shape=3, dtype=power_grid_meta_data[dataset_type][ComponentType.node]), "indptr": np.array([0, 2, 3, 3]), }, ComponentType.sym_load: { @@ -148,8 +148,8 @@ def test_const_dataset__sparse_batch_data(dataset_type): def test_const_dataset__mixed_batch_size(dataset_type): data = { - ComponentType.node: np.zeros(shape=(2, 3), dtype=power_grid_meta_data[dataset_type]["node"]), - ComponentType.line: np.zeros(shape=(3, 3), dtype=power_grid_meta_data[dataset_type]["line"]), + ComponentType.node: np.zeros(shape=(2, 3), dtype=power_grid_meta_data[dataset_type][ComponentType.node]), + ComponentType.line: np.zeros(shape=(3, 3), dtype=power_grid_meta_data[dataset_type][ComponentType.line]), } with pytest.raises(ValueError): CConstDataset(data, dataset_type) @@ -158,10 +158,34 @@ def test_const_dataset__mixed_batch_size(dataset_type): @pytest.mark.parametrize("bad_indptr", (np.ndarray([0, 1]), np.ndarray([0, 3, 2]), np.ndarray([0, 1, 2, 3, 4]))) def test_const_dataset__bad_sparse_data(dataset_type, bad_indptr): data = { - "node": { - "data": np.zeros(shape=2, dtype=power_grid_meta_data[dataset_type]["node"]), + ComponentType.node: { + "data": np.zeros(shape=2, dtype=power_grid_meta_data[dataset_type][ComponentType.node]), "indptr": bad_indptr, }, } with pytest.raises(TypeError): CConstDataset(data, dataset_type) + + +@pytest.mark.parametrize( + ("dtype", "supported"), + [ + (power_grid_meta_data[DatasetType.input][ComponentType.node].dtype["id"], True), + (" dict[ComponentType, np.ndarray]: transformer["tap_pos"] = [-1, 6, -4] transformer["tap_min"] = [-2, 4, 3] transformer["tap_max"] = [2, -4, -3] - transformer["tap_nom"] = [-3, nan_type("transformer", "tap_nom"), 4] + transformer["tap_nom"] = [-3, _nan_type("transformer", "tap_nom"), 4] transformer["tap_size"] = [262.5, 0.0, -10.0] - transformer["uk_min"] = [0.0000000005, nan_type("transformer", "uk_min"), 0.9] - transformer["uk_max"] = [0.0000000005, nan_type("transformer", "uk_max"), 0.8] - transformer["pk_min"] = [300.0, 0.0, nan_type("transformer", "pk_min")] - transformer["pk_max"] = [400.0, -0.1, nan_type("transformer", "pk_max")] + transformer["uk_min"] = [0.0000000005, _nan_type("transformer", "uk_min"), 0.9] + transformer["uk_max"] = [0.0000000005, _nan_type("transformer", "uk_max"), 0.8] + transformer["pk_min"] = [300.0, 0.0, _nan_type("transformer", "pk_min")] + transformer["pk_max"] = [400.0, -0.1, _nan_type("transformer", "pk_max")] three_winding_transformer = initialize_array(DatasetType.input, ComponentType.three_winding_transformer, 4) three_winding_transformer["id"] = [1, 28, 29, 30] @@ -137,49 +137,49 @@ def original_data() -> dict[ComponentType, np.ndarray]: three_winding_transformer["tap_min"] = [-10, -10, -10, -10] three_winding_transformer["tap_max"] = [10, 10, 10, 10] three_winding_transformer["tap_size"] = [-12, 0, 3, 130] - three_winding_transformer["tap_nom"] = [-12, 41, nan_type("three_winding_transformer", "tap_nom"), 0] + three_winding_transformer["tap_nom"] = [-12, 41, _nan_type("three_winding_transformer", "tap_nom"), 0] three_winding_transformer["uk_12_min"] = [ - nan_type("three_winding_transformer", "uk_12_min"), + _nan_type("three_winding_transformer", "uk_12_min"), 1.1, 0.05, - nan_type("three_winding_transformer", "uk_12_min"), + _nan_type("three_winding_transformer", "uk_12_min"), ] three_winding_transformer["uk_13_min"] = [ - nan_type("three_winding_transformer", "uk_13_min"), + _nan_type("three_winding_transformer", "uk_13_min"), 1.2, 0.3, - nan_type("three_winding_transformer", "uk_13_min"), + _nan_type("three_winding_transformer", "uk_13_min"), ] three_winding_transformer["uk_23_min"] = [ - nan_type("three_winding_transformer", "uk_23_min"), + _nan_type("three_winding_transformer", "uk_23_min"), 1, 0.15, - nan_type("three_winding_transformer", "uk_23_min"), + _nan_type("three_winding_transformer", "uk_23_min"), ] - three_winding_transformer["pk_12_min"] = [-450, nan_type("three_winding_transformer", "pk_12_min"), 10, 40] - three_winding_transformer["pk_13_min"] = [-40, nan_type("three_winding_transformer", "pk_13_min"), 40, 50] - three_winding_transformer["pk_23_min"] = [-120, nan_type("three_winding_transformer", "pk_23_min"), 40, 30] + three_winding_transformer["pk_12_min"] = [-450, _nan_type("three_winding_transformer", "pk_12_min"), 10, 40] + three_winding_transformer["pk_13_min"] = [-40, _nan_type("three_winding_transformer", "pk_13_min"), 40, 50] + three_winding_transformer["pk_23_min"] = [-120, _nan_type("three_winding_transformer", "pk_23_min"), 40, 30] three_winding_transformer["uk_12_max"] = [ - nan_type("three_winding_transformer", "uk_12_max"), + _nan_type("three_winding_transformer", "uk_12_max"), 1.1, 0.05, - nan_type("three_winding_transformer", "uk_12_max"), + _nan_type("three_winding_transformer", "uk_12_max"), ] three_winding_transformer["uk_13_max"] = [ - nan_type("three_winding_transformer", "uk_13_max"), + _nan_type("three_winding_transformer", "uk_13_max"), 1.2, 0.3, - nan_type("three_winding_transformer", "uk_13_max"), + _nan_type("three_winding_transformer", "uk_13_max"), ] three_winding_transformer["uk_23_max"] = [ - nan_type("three_winding_transformer", "uk_23_max"), + _nan_type("three_winding_transformer", "uk_23_max"), 1, 0.15, - nan_type("three_winding_transformer", "uk_23_max"), + _nan_type("three_winding_transformer", "uk_23_max"), ] - three_winding_transformer["pk_12_max"] = [-450, nan_type("three_winding_transformer", "pk_12_max"), 10, 40] - three_winding_transformer["pk_13_max"] = [-40, nan_type("three_winding_transformer", "pk_12_max"), 40, 50] - three_winding_transformer["pk_23_max"] = [-120, nan_type("three_winding_transformer", "pk_12_max"), 40, 30] + three_winding_transformer["pk_12_max"] = [-450, _nan_type("three_winding_transformer", "pk_12_max"), 10, 40] + three_winding_transformer["pk_13_max"] = [-40, _nan_type("three_winding_transformer", "pk_12_max"), 40, 50] + three_winding_transformer["pk_23_max"] = [-120, _nan_type("three_winding_transformer", "pk_12_max"), 40, 30] transformer_tap_regulator = initialize_array(DatasetType.input, ComponentType.transformer_tap_regulator, 5) transformer_tap_regulator["id"] = [51, 52, 53, 54, 1] @@ -261,11 +261,13 @@ def original_data() -> dict[ComponentType, np.ndarray]: fault = initialize_array(DatasetType.input, ComponentType.fault, 20) fault["id"] = [1] + list(range(32, 51)) fault["status"] = [0, -1, 2] + 17 * [1] - fault["fault_type"] = 6 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [nan_type("fault", "fault_type"), 4] - fault["fault_phase"] = list(range(1, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [nan_type("fault", "fault_phase"), 7] + fault["fault_type"] = 6 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [_nan_type("fault", "fault_type"), 4] + fault["fault_phase"] = ( + list(range(1, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [_nan_type("fault", "fault_phase"), 7] + ) fault["fault_object"] = [200, 3] + list(range(10, 28, 2)) + 9 * [0] - fault["r_f"] = [-1.0, 0.0, 1.0] + 17 * [nan_type("fault", "r_f")] - fault["x_f"] = [-1.0, 0.0, 1.0] + 17 * [nan_type("fault", "x_f")] + fault["r_f"] = [-1.0, 0.0, 1.0] + 17 * [_nan_type("fault", "r_f")] + fault["x_f"] = [-1.0, 0.0, 1.0] + 17 * [_nan_type("fault", "x_f")] data = { ComponentType.node: node, @@ -654,7 +656,7 @@ def test_fault(input_data): ComponentType.fault, "fault_type", list(range(32, 51)), - 5 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [nan_type("fault", "fault_type"), 4], + 5 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [_nan_type("fault", "fault_type"), 4], ) in validation_errors ) @@ -663,7 +665,7 @@ def test_fault(input_data): ComponentType.fault, "fault_phase", list(range(32, 51)), - list(range(2, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [nan_type("fault", "fault_phase"), 7], + list(range(2, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [_nan_type("fault", "fault_phase"), 7], ) in validation_errors ) diff --git a/tests/unit/validation/test_utils.py b/tests/unit/validation/test_utils.py index d2879dbaf..95634e960 100644 --- a/tests/unit/validation/test_utils.py +++ b/tests/unit/validation/test_utils.py @@ -7,39 +7,39 @@ from power_grid_model import initialize_array from power_grid_model.validation.errors import NotGreaterThanError -from power_grid_model.validation.utils import errors_to_string, eval_field_expression, update_input_data +from power_grid_model.validation.utils import _eval_field_expression, _update_input_data, errors_to_string def test_eval_field_expression(): data = np.array([(1.0, 2.0, 4.0), (8.0, 16.0, 0.0)], dtype=[("a", "f8"), ("b2", "f8"), ("c_3", "f8")]) - np.testing.assert_array_equal(eval_field_expression(data, "a"), np.array([1, 8])) - np.testing.assert_array_equal(eval_field_expression(data, "a/b2"), np.array([0.5, 0.5])) - np.testing.assert_array_equal(eval_field_expression(data, "a / b2"), np.array([0.5, 0.5])) - np.testing.assert_array_equal(eval_field_expression(data, "a / c_3"), np.array([0.25, np.nan])) + np.testing.assert_array_equal(_eval_field_expression(data, "a"), np.array([1, 8])) + np.testing.assert_array_equal(_eval_field_expression(data, "a/b2"), np.array([0.5, 0.5])) + np.testing.assert_array_equal(_eval_field_expression(data, "a / b2"), np.array([0.5, 0.5])) + np.testing.assert_array_equal(_eval_field_expression(data, "a / c_3"), np.array([0.25, np.nan])) with pytest.raises(ValueError): - eval_field_expression(data, "a / 1") + _eval_field_expression(data, "a / 1") with pytest.raises(ValueError): - eval_field_expression(data, "a + 100") + _eval_field_expression(data, "a + 100") with pytest.raises(ValueError): - eval_field_expression(data, "a + 100.123") + _eval_field_expression(data, "a + 100.123") with pytest.raises(ValueError): - eval_field_expression(data, "a + b2") + _eval_field_expression(data, "a + b2") with pytest.raises(ValueError): - eval_field_expression(data, "a - b2") + _eval_field_expression(data, "a - b2") with pytest.raises(ValueError): - eval_field_expression(data, "a * b2") + _eval_field_expression(data, "a * b2") with pytest.raises(ValueError): - eval_field_expression(data, "a * -b2") + _eval_field_expression(data, "a * -b2") with pytest.raises(ValueError): - eval_field_expression(data, "a + b2 + c_3") + _eval_field_expression(data, "a + b2 + c_3") with pytest.raises(ValueError): - eval_field_expression(data, "max(a, b2)") + _eval_field_expression(data, "max(a, b2)") with pytest.raises(ValueError): - eval_field_expression(data, "(a + b2) / c_3") + _eval_field_expression(data, "(a + b2) / c_3") with pytest.raises(KeyError): - eval_field_expression(data, "a / b") + _eval_field_expression(data, "a / b") def assert_list_of_numpy_arrays_equal(expected, actual): @@ -70,7 +70,7 @@ def test_update_input_data(): update_test["id"] = [6, 5, 2, 3] update_test["q_specified"] = [np.nan, 5.2, np.nan, 3.2] - merged = update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) + merged = _update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) np.testing.assert_array_equal(merged["sym_load"]["id"], [4, 5, 6, 1, 2, 3]) np.testing.assert_array_equal(merged["sym_load"]["p_specified"], [4.0, 5.0, 6.0, 1.0, 2.0, 3.0]) np.testing.assert_array_equal(merged["sym_load"]["q_specified"], [4.1, 5.2, 6.1, np.nan, np.nan, 3.2]) @@ -85,7 +85,7 @@ def test_update_input_data__without_ids(): update_test = initialize_array("update", "sym_load", 6) input_test["q_specified"] = [4.1, 5.2, np.nan, np.nan, np.nan, 3.2] - merged = update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) + merged = _update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) np.testing.assert_array_equal(merged["sym_load"]["id"], [4, 5, 6, 1, 2, 3]) np.testing.assert_array_equal(merged["sym_load"]["p_specified"], [4.0, 5.0, 6.0, 1.0, 2.0, 3.0]) np.testing.assert_array_equal(merged["sym_load"]["q_specified"], [4.1, 5.2, np.nan, np.nan, np.nan, 3.2]) @@ -100,7 +100,7 @@ def test_update_input_data_int_nan(): update_line["id"] = [1, 3] update_line["from_status"] = [-128, 1] - merged = update_input_data(input_data={"line": input_line}, update_data={"line": update_line}) + merged = _update_input_data(input_data={"line": input_line}, update_data={"line": update_line}) np.testing.assert_array_equal(merged["line"]["from_status"], [0, -128, 1]) @@ -114,7 +114,7 @@ def test_update_input_data_asym_nans(): update_load["id"] = [1, 2, 3] update_load["p_specified"] = [[np.nan, np.nan, np.nan], [np.nan, np.nan, 5.3], [6.1, 6.2, 6.3]] - merged = update_input_data(input_data={"asym_load": input_load}, update_data={"asym_load": update_load}) + merged = _update_input_data(input_data={"asym_load": input_load}, update_data={"asym_load": update_load}) np.testing.assert_array_equal( merged["asym_load"]["p_specified"], [[1.1, 1.2, 1.3], [2.1, np.nan, 5.3], [6.1, 6.2, 6.3]] diff --git a/tests/unit/validation/test_validation_functions.py b/tests/unit/validation/test_validation_functions.py index 73c1102d3..6349106a9 100644 --- a/tests/unit/validation/test_validation_functions.py +++ b/tests/unit/validation/test_validation_functions.py @@ -9,16 +9,9 @@ import pytest from power_grid_model import CalculationType, LoadGenType, MeasuredTerminalType, initialize_array, power_grid_meta_data +from power_grid_model._core.dataset_definitions import ComponentType, DatasetType from power_grid_model._utils import compatibility_convert_row_columnar_dataset -from power_grid_model.core.dataset_definitions import ComponentType, DatasetType -from power_grid_model.enum import ( - Branch3Side, - BranchSide, - CalculationType, - ComponentAttributeFilterOptions, - FaultType, - TapChangingStrategy, -) +from power_grid_model.enum import Branch3Side, BranchSide, CalculationType, ComponentAttributeFilterOptions, FaultType from power_grid_model.validation import assert_valid_input_data from power_grid_model.validation.errors import ( IdNotInDatasetError, @@ -649,17 +642,17 @@ def single_component_twice_data(): @pytest.mark.parametrize("measured_terminal_type", MeasuredTerminalType) @patch("power_grid_model.validation.validation.validate_base", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_greater_than_zero", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_enum_values", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_ids") +@patch("power_grid_model.validation.validation._all_greater_than_zero", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_enum_values", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_ids") def test_validate_generic_power_sensor__all_terminal_types( - all_valid_ids: MagicMock, measured_terminal_type: MeasuredTerminalType + _all_valid_ids: MagicMock, measured_terminal_type: MeasuredTerminalType ): # Act validate_generic_power_sensor(data={}, component="") # type: ignore # Assert - all_valid_ids.assert_any_call( + _all_valid_ids.assert_any_call( ANY, ANY, field=ANY, ref_components=ANY, measured_terminal_type=measured_terminal_type ) @@ -680,17 +673,17 @@ def test_validate_generic_power_sensor__all_terminal_types( ], ) @patch("power_grid_model.validation.validation.validate_base", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_greater_than_zero", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_enum_values", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_ids") +@patch("power_grid_model.validation.validation._all_greater_than_zero", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_enum_values", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_ids") def test_validate_generic_power_sensor__terminal_types( - all_valid_ids: MagicMock, ref_component: str | list[str], measured_terminal_type: MeasuredTerminalType + _all_valid_ids: MagicMock, ref_component: str | list[str], measured_terminal_type: MeasuredTerminalType ): # Act validate_generic_power_sensor(data={}, component="") # type: ignore # Assert - all_valid_ids.assert_any_call( + _all_valid_ids.assert_any_call( ANY, ANY, field=ANY, ref_components=ref_component, measured_terminal_type=measured_terminal_type )