From 5fba919a3890de6773e154496373fa43b8c451ee Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Tue, 24 Oct 2023 10:11:25 +0100 Subject: [PATCH] Merge branch "main" into "update-param-names" --- .github/workflows/pr-ci.yml | 1 + .gitignore | 1 + doc/_static/notebooks/logging.ipynb | 286 ++++++++++++++++++++ doc/conf.py | 10 +- doc/user/advanced_features.rst | 2 +- doc/user/troubleshooting.rst | 27 +- src/calliope/backend/backend_model.py | 32 ++- src/calliope/backend/latex_backend_model.py | 9 +- src/calliope/backend/parsing.py | 42 ++- src/calliope/backend/pyomo_backend_model.py | 93 +++++-- src/calliope/core/model.py | 29 +- src/calliope/core/util/logging.py | 7 +- src/calliope/preprocess/model_data.py | 8 +- tests/common/lp_files/balance_conversion.lp | 8 +- tests/common/lp_files/flow_out_max.lp | 24 +- tests/common/lp_files/source_max.lp | 8 +- tests/common/lp_files/storage_max.lp | 12 +- tests/test_backend_parsing.py | 46 ++-- tests/test_backend_pyomo.py | 27 +- tests/test_constraint_results.py | 3 +- tests/test_core_model.py | 6 +- tests/test_core_util.py | 40 ++- tests/test_io.py | 2 +- tests/test_model_data.py | 12 +- 24 files changed, 583 insertions(+), 152 deletions(-) create mode 100644 doc/_static/notebooks/logging.ipynb diff --git a/.github/workflows/pr-ci.yml b/.github/workflows/pr-ci.yml index e6ad47517..12e89ff2b 100644 --- a/.github/workflows/pr-ci.yml +++ b/.github/workflows/pr-ci.yml @@ -25,6 +25,7 @@ concurrency: jobs: test: + if: github.event.pull_request.draft == false strategy: matrix: os: [ubuntu-latest] diff --git a/.gitignore b/.gitignore index 30738495e..f9474f95b 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,4 @@ dist/ doc/_build/ doc/_static/notebooks/*.html .ipynb_checkpoints +outputs/ diff --git a/doc/_static/notebooks/logging.ipynb b/doc/_static/notebooks/logging.ipynb new file mode 100644 index 000000000..a5472e9aa --- /dev/null +++ b/doc/_static/notebooks/logging.ipynb @@ -0,0 +1,286 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Calliope logging examples\n", + "\n", + "In this notebook, we will look at ways of capturing calliope logging outputs and printing them to the console or to file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "from pathlib import Path\n", + "\n", + "import calliope" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using internal Calliope functionality\n", + "The `calliope.set_log_verbosity` method allows you to quickly set the logging level across all calliope loggers.\n", + "It doesn't require you to know anything about the `logging` package, just the available [logging levels](https://docs.python.org/3/library/logging.html#logging-levels)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# This is the default log verbosity that is set on importing calliope.\n", + "# It will print the WARNING and INFO log levels to the console\n", + "# and it will print the solver output (which is otherwise at the DEBUG log level)\n", + "calliope.set_log_verbosity(\"info\")\n", + "m = calliope.examples.national_scale()\n", + "m.build()\n", + "m.solve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# This will print WARNING and INFO log levels to the console but *NOT* the log solver output\n", + "calliope.set_log_verbosity(\"info\", include_solver_output=False)\n", + "m = calliope.examples.national_scale()\n", + "m.build()\n", + "m.solve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# You can set the log verbosity to print all DEBUG level logs to the console\n", + "calliope.set_log_verbosity(\"debug\")\n", + "m = calliope.examples.national_scale()\n", + "m.build()\n", + "m.solve()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adding your own console logging handler\n", + "If the `calliope.set_log_verbosity` method isn't providing you with enough flexibility then you can add your own logging `handlers`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Grab the calliope logger, which will also automatically all the child loggers (e.g., `calliope.core.model`).\n", + "logger = logging.getLogger(\"calliope\")\n", + "\n", + "# Remove existing handlers (i.e., those introduced by `calliope.set_log_verbosity` above)\n", + "logger.handlers.clear()\n", + "\n", + "# You can define your own custom formatter.\n", + "# See https://docs.python.org/3/library/logging.html#logrecord-attributes for available attributes.\n", + "formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n", + "\n", + "# Add a ConsoleHandler (this is what `calliope.set_log_verbosity` is doing under the hood)\n", + "console_handler = logging.StreamHandler()\n", + "console_handler.setLevel(\n", + " logging.INFO\n", + ") # In this example, we only want to see warnings in the console\n", + "console_handler.setFormatter(formatter)\n", + "logger.addHandler(console_handler)\n", + "\n", + "# You can also use logging in your scripts to add more information\n", + "logger.info(\"Loading the national-scale example model\")\n", + "m = calliope.examples.national_scale()\n", + "\n", + "logger.info(\"Building the national-scale example model optimisation problem\")\n", + "m.build()\n", + "\n", + "logger.info(\"Solving the national-scale example model optimisation problem\")\n", + "m.solve()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adding your own file logging handler\n", + "You may find it more practical to store logging information in files, particularly if you are running your model on a remote device or if you have a *very* large model.\n", + "\n", + "Then, you can search through your logs using your favourite IDE." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Grab the calliope logger, which will also automatically all the child loggers (e.g., `calliope.core.model`).\n", + "logger = logging.getLogger(\"calliope\")\n", + "\n", + "# Remove existing handlers (i.e., those introduced earlier in this notebook)\n", + "logger.handlers.clear()\n", + "\n", + "# You can define your own custom formatter.\n", + "# See https://docs.python.org/3/library/logging.html#logrecord-attributes for available attributes.\n", + "formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n", + "\n", + "log_filepath = Path(\".\") / \"outputs\"\n", + "log_filepath.mkdir(parents=True, exist_ok=True)\n", + "\n", + "# Set up a file handler, which will store log outputs in a file\n", + "file_handler = logging.FileHandler(log_filepath / \"calliope.log\")\n", + "file_handler.setLevel(logging.DEBUG)\n", + "file_handler.setFormatter(formatter)\n", + "logger.addHandler(file_handler)\n", + "\n", + "# You can also use logging in your scripts to add more information\n", + "logger.info(\"Loading the national-scale example model\")\n", + "m = calliope.examples.national_scale()\n", + "\n", + "logger.info(\"Building the national-scale example model optimisation problem\")\n", + "m.build()\n", + "\n", + "logger.info(\"Solving the national-scale example model optimisation problem\")\n", + "m.solve()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that there is no logging to the console here, but that there is now a file `outputs/calliope.log` that contains the logging information.\n", + "\n", + "We can also log both to the console at one level and to file at another:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Grab the calliope logger, which will also automatically all the child loggers (e.g., `calliope.core.model`).\n", + "logger = logging.getLogger(\"calliope\")\n", + "\n", + "# Remove existing handlers (i.e., those introduced earlier in this notebook)\n", + "logger.handlers.clear()\n", + "\n", + "# You can define your own custom formatter.\n", + "# See https://docs.python.org/3/library/logging.html#logrecord-attributes for available attributes.\n", + "formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n", + "\n", + "# Add a ConsoleHandler (this is what `calliope.set_log_verbosity` is doing under the hood)\n", + "console_handler = logging.StreamHandler()\n", + "# Log to console at the INFO level\n", + "console_handler.setLevel(logging.INFO)\n", + "console_handler.setFormatter(formatter)\n", + "logger.addHandler(console_handler)\n", + "\n", + "log_filepath = Path(\".\") / \"outputs\"\n", + "log_filepath.mkdir(parents=True, exist_ok=True)\n", + "\n", + "# Set up a file handler, which will store log outputs in a file\n", + "file_handler = logging.FileHandler(log_filepath / \"calliope.log\")\n", + "# Log to file at the DEBUG level\n", + "file_handler.setLevel(logging.DEBUG)\n", + "file_handler.setFormatter(formatter)\n", + "logger.addHandler(file_handler)\n", + "\n", + "# You can also use logging in your scripts to add more information\n", + "logger.info(\"Loading the national-scale example model\")\n", + "m = calliope.examples.national_scale()\n", + "\n", + "logger.info(\"Building the national-scale example model optimisation problem\")\n", + "m.build()\n", + "\n", + "logger.info(\"Solving the national-scale example model optimisation problem\")\n", + "m.solve()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The log file will contain all calliope child logger outputs in one place.\n", + "You will notice the name of the logger, which corresponds to the file where the log was recorded, at the second level of the log messages.\n", + "\n", + "We can store each of these child loggers to a different file if we like:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Grab the calliope logger, which will also automatically all the child loggers (e.g., `calliope.core.model`).\n", + "logger = logging.getLogger(\"calliope\")\n", + "\n", + "# Remove existing handlers (i.e., those introduced earlier in this notebook)\n", + "logger.handlers.clear()\n", + "\n", + "# You can define your own custom formatter.\n", + "# Here we don't include the logger name, as the filename will contain that information.\n", + "# See https://docs.python.org/3/library/logging.html#logrecord-attributes for available attributes.\n", + "formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n", + "\n", + "log_filepath = Path(\".\") / \"outputs\"\n", + "log_filepath.mkdir(parents=True, exist_ok=True)\n", + "\n", + "for logger_name in logging.root.manager.loggerDict.keys():\n", + " if not logger_name.startswith(\"calliope\"):\n", + " # There are lots of other loggers that calliope imports from its dependencies which we will ignore.\n", + " # You can also dump these log to files if you like, by removing this conditional statement.\n", + " continue\n", + " # Set up a file handler, which will store log outputs in a file\n", + " file_handler = logging.FileHandler(log_filepath / f\"{logger_name}.log\")\n", + " # Log to file at the DEBUG level\n", + " file_handler.setLevel(logging.DEBUG)\n", + " file_handler.setFormatter(formatter)\n", + " logging.getLogger(logger_name).addHandler(file_handler)\n", + "\n", + "m = calliope.examples.national_scale()\n", + "m.build()\n", + "m.solve()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/doc/conf.py b/doc/conf.py index 56ce8d7a6..c2db0fd0e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -103,7 +103,15 @@ # Mock modules for Read The Docs autodoc generation -MOCK_MODULES = ["xarray", "pandas", "numpy", "pyomo", "sklearn", "pyparsing", "netCDF4"] +MOCK_MODULES = [ + "xarray", + "pandas", + "numpy", + "pyomo", + "sklearn", + "pyparsing", + "netCDF4", +] autodoc_mock_imports = MOCK_MODULES autodoc_typehints = "both" autodoc_member_order = "bysource" diff --git a/doc/user/advanced_features.rst b/doc/user/advanced_features.rst index f2bbecc86..c895f63b0 100644 --- a/doc/user/advanced_features.rst +++ b/doc/user/advanced_features.rst @@ -98,7 +98,7 @@ For example: parent: supply carrier: power constraints: - resource: file=pv_resource.csv + source_max: file=pv_resource.csv lifetime: 30 costs: monetary: diff --git a/doc/user/troubleshooting.rst b/doc/user/troubleshooting.rst index 5f81a80fc..4fac3322c 100644 --- a/doc/user/troubleshooting.rst +++ b/doc/user/troubleshooting.rst @@ -151,12 +151,31 @@ Particularly if your problem is large, you may not want to rebuild the backend t Debugging model errors ---------------------- -Calliope provides a method to save its fully built and commented internal representation of a model to a single YAML file with :python:`Model.save_commented_model_yaml(path)`. Comments in the resulting YAML file indicate where original values were overridden. +Inspecting debug logs +^^^^^^^^^^^^^^^^^^^^^ +At the `debug` logging level, we output a lot of information about your model build which may be useful to inspect. +You can do so by setting :python:`calliope.set_log_verbosity("debug")` and then running your model. -Because this is Calliope's internal representation of a model directly before the ``model_data`` ``xarray.Dataset`` is built, it can be useful for debugging possible issues in the model formulation, for example, undesired constraints that exist at specific locations because they were specified model-wide without having been superseded by location-specific settings. +If you have a bit more Python experience, you can also consider accessing and working with loggers at different points in our internals using the `logging` package. -Further processing of the data does occur before solving the model. The final values of parameters used by the backend solver to generate constraints can be analysed when running an interactive Python session by running :python:`model.backend.get_input_params()`. This provides a user with an xarray Dataset which will look very similar to :python:`model.inputs`, except that assumed :ref:`default values ` will be included. An attempt at running the model has to be made in order to be able to run this command. +- For input YAML and CSV file processing: :python:`logging.getLogger("calliope.preprocess")`. +- For processing of math syntax: :python:`logging.getLogger("calliope.backend")`. + +For more examples of using loggers, see :nbviewer_docs:`this notebook <_static/notebooks/logging.ipynb>`. + +Validating your math syntax +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +You can do a (relatively) quick check of the validity of any custom math syntax in your model by running :python:`model.validate_math_strings(my_math_dict)`. +This will tell you if any of the syntax is malformed, although it cannot tell you if the model will build when set to the backend to generate an optimisation model. + +Inspecting private data structures +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +There are private attributes of the Calliope `Model` object that you can access to gain additional insights into your model runs. + +- For the result of processing your input YAML files, the dictionary :python:`model._model_run`. +- For all data variables in one place (i.e., the combination of `inputs` and `results`), the dataset :python:`model._model_data`. +- For the build backend objects (e.g., Pyomo objects) in an array format, the dataset :python:`model.backend._dataset`. .. seealso:: - If using Calliope interactively in a Python session, we recommend reading up on the `Python debugger `_ and (if using Jupyter notebooks) making use of the `%debug magic `_. + If using Calliope interactively in a Python session, we recommend reading up on the `Python debugger `_ and making use of the `%debug magic `_. diff --git a/src/calliope/backend/backend_model.py b/src/calliope/backend/backend_model.py index d9007c807..34d3052f8 100644 --- a/src/calliope/backend/backend_model.py +++ b/src/calliope/backend/backend_model.py @@ -35,7 +35,7 @@ "variables", "constraints", "objectives", "parameters", "global_expressions" ] -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) class BackendModelGenerator(ABC): @@ -52,6 +52,7 @@ def __init__(self, inputs: xr.Dataset): self._dataset = xr.Dataset() self.inputs = inputs self.valid_math_element_names: set = set() + self._solve_logger = logging.getLogger(__name__ + ".") @abstractmethod def add_parameter( @@ -160,6 +161,23 @@ def add_objective( Objective configuration dictionary, ready to be parsed and then evaluated. """ + def log( + self, + component_type: _COMPONENTS_T, + component_name: str, + message: str, + level: Literal["info", "warning", "debug", "error", "critical"] = "debug", + ): + """Log to module-level logger with some prettification of the message + + Args: + message (str): Message to log. + level (Literal["info", "warning", "debug", "error", "critical"], optional): Log level. Defaults to "debug". + """ + getattr(LOGGER, level)( + f"Optimisation model | {component_type}:{component_name} | {message}" + ) + def _add_component( self, name: str, @@ -194,9 +212,8 @@ def _add_component( component_dict = self.inputs.math[component_type][name] if break_early and component_dict.get("active", False): - logger.debug( - f"({component_type.removesuffix('s')}, {name}): " - "Component deactivated and therefore not built." + self.log( + component_type, name, "Component deactivated and therefore not built." ) return None @@ -246,9 +263,7 @@ def _add_component( substring = "trying to set two equations for the same component." self.delete_component(name, component_type) - raise BackendError( - f"({component_type.removesuffix('s')}, {element.name}): {substring}" - ) + raise BackendError(f"{element.name} | {substring}") to_fill = component_setter(element, where, references) component_da = component_da.fillna(to_fill) @@ -310,6 +325,9 @@ def _add_all_inputs_as_parameters(self) -> None: for param_name, default_val in self.inputs.attrs["defaults"].items(): if param_name in self.parameters.keys(): continue + self.log( + "parameters", param_name, "Component not defined; using default value." + ) self.add_parameter( param_name, xr.DataArray(default_val), use_inf_as_na=False ) diff --git a/src/calliope/backend/latex_backend_model.py b/src/calliope/backend/latex_backend_model.py index f7f0311f3..19dbeafa9 100644 --- a/src/calliope/backend/latex_backend_model.py +++ b/src/calliope/backend/latex_backend_model.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging import textwrap import typing from pathlib import Path @@ -15,6 +16,9 @@ _ALLOWED_MATH_FILE_FORMATS = Literal["tex", "rst", "md"] +LOGGER = logging.getLogger(__name__) + + class MathDocumentation: def __init__(self, backend_builder: Callable) -> None: """Math documentation builder/writer @@ -95,11 +99,14 @@ def write( # noqa: F811 if format is None and filename is not None: format = Path(filename).suffix.removeprefix(".") # type: ignore + LOGGER.info( + f"Inferring math documentation format from filename as `{format}`." + ) allowed_formats = typing.get_args(_ALLOWED_MATH_FILE_FORMATS) if format is None or format not in allowed_formats: raise ValueError( - f"Math documentation style must be one of {allowed_formats}, received `{format}`" + f"Math documentation format must be one of {allowed_formats}, received `{format}`" ) populated_doc = self._instance.generate_math_doc(format) diff --git a/src/calliope/backend/parsing.py b/src/calliope/backend/parsing.py index 40896291f..4fdb451e6 100644 --- a/src/calliope/backend/parsing.py +++ b/src/calliope/backend/parsing.py @@ -5,6 +5,7 @@ import functools import itertools +import logging import operator from typing import ( TYPE_CHECKING, @@ -81,6 +82,8 @@ class UnparsedObjectiveDict(TypedDict): ] T = TypeVar("T", bound=UNPARSED_DICTS) +LOGGER = logging.getLogger(__name__) + class ParsedBackendEquation: def __init__( @@ -292,9 +295,12 @@ def evaluate_where( # noqa: F811 if as_latex: return r"\land{}".join(f"({i})" for i in evaluated_wheres if i != "true") else: - return xr.DataArray( + where = xr.DataArray( functools.reduce(operator.and_, [initial_where, *evaluated_wheres]) ) + if not where.any(): + self.log_not_added("'where' does not apply anywhere.") + return where def drop_dims_not_in_foreach(self, where: xr.DataArray) -> xr.DataArray: """the dimensions not included in "foreach" are removed from the input array @@ -358,6 +364,21 @@ def evaluate_expression( # noqa: F811 apply_where=apply_where, ) + def log_not_added( + self, + message: str, + level: Literal["info", "warning", "debug", "error", "critical"] = "debug", + ): + """Log to module-level logger with some prettification of the message + + Args: + message (str): Message to log. + level (Literal["info", "warning", "debug", "error", "critical"], optional): Log level. Defaults to "debug". + """ + getattr(LOGGER, level)( + f"Math parsing | {self.name} | Component not added; {message}" + ) + class ParsedBackendComponent(ParsedBackendEquation): _ERR_BULLET: str = " * " @@ -385,8 +406,7 @@ def __init__( name (str): Name of the optimisation problem component unparsed_data (T): Unparsed math formulation. Expected structure depends on the group to which the optimisation problem component belongs. """ - self.name = name - self.group_name = group + self.name = f"{group}:{name}" self._unparsed: dict = dict(unparsed_data) self.where: list[pp.ParseResults] = [] @@ -647,7 +667,7 @@ def extend_equation_list_with_expression_group( invalid_items = equation_items.difference(parsed_items.keys()) if invalid_items: raise KeyError( - f"({self.group_name}, {self.name}): Undefined {expression_group} found in equation: {invalid_items}" + f"{self.name}: Undefined {expression_group} found in equation: {invalid_items}" ) parsed_item_product = itertools.product( @@ -678,10 +698,8 @@ def combine_exists_and_foreach(self, model_data: xr.Dataset) -> xr.DataArray: # Add other dimensions (costs, timesteps, etc.) add_dims = set(self.sets).difference(exists.dims) if add_dims.difference(model_data.dims): - exceptions.warn( - f"Not generating optimisation problem object `{self.name}` because it is " - f"indexed over unidentified set name(s): `{add_dims.difference(model_data.dims)}`.", - _class=exceptions.BackendWarning, + self.log_not_added( + f"indexed over unidentified set names: `{add_dims.difference(model_data.dims)}`." ) return xr.DataArray(False) exists_and_foreach = [exists, *[model_data[i].notnull() for i in add_dims]] @@ -712,6 +730,10 @@ def generate_top_level_where_array( xr.DataArray: Boolean array defining on which index items a parsed component should be built. """ foreach_where = self.combine_exists_and_foreach(model_data) + + if not foreach_where.any(): + self.log_not_added("'foreach' does not apply anywhere.") + if break_early and not foreach_where.any(): return foreach_where @@ -730,7 +752,7 @@ def raise_caught_errors(self): """If there are any parsing errors, pipe them to the ModelError bullet point list generator""" if not self._is_valid: exceptions.print_warnings_and_raise_errors( - errors={f"({self.group_name}, {self.name})": self._errors}, - during="math string parsing (marker indicates where parsing stopped, not strictly the equation term that caused the failure)", + errors={f"{self.name}": self._errors}, + during="math string parsing (marker indicates where parsing stopped, which might not be the root cause of the issue; sorry...)", bullet=self._ERR_BULLET, ) diff --git a/src/calliope/backend/pyomo_backend_model.py b/src/calliope/backend/pyomo_backend_model.py index 7be47211d..9a9ef113d 100644 --- a/src/calliope/backend/pyomo_backend_model.py +++ b/src/calliope/backend/pyomo_backend_model.py @@ -38,7 +38,7 @@ "variables", "constraints", "objectives", "parameters", "global_expressions" ] -logger = logging.getLogger("calliope.backend.backend_model") +LOGGER = logging.getLogger(__name__) COMPONENT_TRANSLATOR = { "parameter": "parameter", @@ -80,6 +80,11 @@ def add_parameter( use_inf_as_na=use_inf_as_na, ) if parameter_da.isnull().all(): + self.log( + "parameters", + parameter_name, + "Component not added; no data found in array.", + ) self.delete_component(parameter_name, "parameters") parameter_da = parameter_da.astype(float) @@ -191,9 +196,12 @@ def _objective_setter( expr = element.evaluate_expression(self.inputs, self, references=references) objective = pmo.objective(expr.item(), sense=sense) if name == self.inputs.run_config["objective"]: + text = "activated" objective.activate() else: + text = "deactivated" objective.deactivate() + self.log("objectives", name, f"Objective {text}.") self._instance.objectives[name].append(objective) return xr.DataArray(objective) @@ -214,8 +222,14 @@ def get_parameter( param_as_vals = self._apply_func(self._from_pyomo_param, parameter) if parameter.original_dtype.kind == "M": # i.e., np.datetime64 + self.log("parameters", name, "Converting Pyomo object to datetime dtype.") return xr.apply_ufunc(pd.to_datetime, param_as_vals) else: + self.log( + "parameters", + name, + f"Converting Pyomo object to {parameter.original_dtype} dtype.", + ) return param_as_vals.astype(parameter.original_dtype) def get_constraint( @@ -303,8 +317,8 @@ def solve( ) warmstart = False - with redirect_stdout(LogWriter(logger, "debug", strip=True)): # type: ignore - with redirect_stderr(LogWriter(logger, "error", strip=True)): # type: ignore + with redirect_stdout(LogWriter(self._solve_logger, "debug", strip=True)): # type: ignore + with redirect_stderr(LogWriter(self._solve_logger, "error", strip=True)): # type: ignore # Ignore most of gurobipy's logging, as it's output is # already captured through STDOUT logging.getLogger("gurobipy").setLevel(logging.ERROR) @@ -316,12 +330,12 @@ def solve( self._instance.load_solution(results.solution[0]) else: - logger.critical("Problem status:") + self._solve_logger.critical("Problem status:") for line in str(results.problem[0]).split("\n"): - logger.critical(line) - logger.critical("Solver status:") + self._solve_logger.critical(line) + self._solve_logger.critical("Solver status:") for line in str(results.solver[0]).split("\n"): - logger.critical(line) + self._solve_logger.critical(line) model_warn("Model solution was non-optimal.", _class=BackendWarning) @@ -333,7 +347,7 @@ def __renamer(val, *idx): val.calliope_coords = idx with self._datetime_as_string(self._dataset): - for component_type in ["parameters", "variables"]: + for component_type in ["parameters", "variables", "constraints"]: for da in self._dataset.filter_by_attrs( coords_in_name=False, **{"obj_type": component_type} ).values(): @@ -394,8 +408,12 @@ def update_parameter( ): refs_to_update = self._find_all_references(parameter_da.attrs["references"]) if refs_to_update: - logger.warning( - f"Defining values for a previously fully/partially undefined parameter. The optimisation problem components {refs_to_update} will be re-built." + self.log( + "parameters", + name, + "Defining values for a previously fully/partially undefined parameter. " + f"The optimisation problem components {refs_to_update} will be re-built.", + "info", ) self.delete_component(name, "parameters") self.add_parameter( @@ -406,8 +424,11 @@ def update_parameter( return None if missing_dims_in_new_vals: - logger.warning( + self.log( + "parameters", + name, f"New values will be broadcast along the {missing_dims_in_new_vals} dimension(s)." + "info", ) self._apply_func(self._update_pyomo_param, parameter_da, new_values) @@ -424,6 +445,11 @@ def update_variable_bounds( for bound_name, new_bounds in {"min": min, "max": max}.items(): if new_bounds is None: + self.log( + "variables", + name, + f"{bound_name} bound not being updated as it has not been defined.", + ) continue existing_bound_param = self.inputs.math.get_key( @@ -431,14 +457,18 @@ def update_variable_bounds( ) if existing_bound_param in self.parameters: raise BackendError( - f"Cannot update variable bounds that have been set by parameters. use `update_parameter('{existing_bound_param}')` to update the {bound_name} bound of {name}." + "Cannot update variable bounds that have been set by parameters. " + f"Use `update_parameter('{existing_bound_param}')` to update the {bound_name} bound of {name}." ) bound_da = xr.DataArray(new_bounds) missing_dims_in_new_vals = set(variable_da.dims).difference(bound_da.dims) if missing_dims_in_new_vals: - logger.warning( - f"New `{bound_name}` bounds for variable `{name}` will be broadcast along the {missing_dims_in_new_vals} dimension(s)." + self.log( + "variables", + name, + f"New `{bound_name}` bounds will be broadcast along the {missing_dims_in_new_vals} dimension(s).", + "info", ) self._apply_func( self._update_pyomo_variable, @@ -512,6 +542,13 @@ def _get_capacity_bounds( def __get_bound(bound): this_bound = bounds.get(bound, None) if isinstance(this_bound, str): + text1 = bound + text2 = this_bound + self.log( + "variables", + name, + f"{text1} bound applied according to the {text2} parameter values.", + ) return self.get_parameter(this_bound) else: # TODO: decide if this parameter should be added to the backend dataset too @@ -630,7 +667,7 @@ def _to_pyomo_constraint( *, op: Literal["==", ">=", "<="], name: str, - ) -> Union[type[pmo.constraint], float]: + ) -> Union[type[ObjConstraint], float]: """ Utility function to generate a pyomo constraint for every element of an xarray DataArray. @@ -647,7 +684,7 @@ def _to_pyomo_constraint( name (str): Name of constraint Returns: - Union[type[pmo.constraint], float]: + Union[type[ObjConstraint], float]: If mask is True, return np.nan. Otherwise return pmo_constraint(expr=lhs op rhs). """ @@ -655,11 +692,11 @@ def _to_pyomo_constraint( if not mask: return np.nan elif op == "==": - constraint = pmo.constraint(expr=lhs == rhs) + constraint = ObjConstraint(expr=lhs == rhs) elif op == "<=": - constraint = pmo.constraint(expr=lhs <= rhs) + constraint = ObjConstraint(expr=lhs <= rhs) elif op == ">=": - constraint = pmo.constraint(expr=lhs >= rhs) + constraint = ObjConstraint(expr=lhs >= rhs) self._instance.constraints[name].append(constraint) return constraint @@ -750,12 +787,12 @@ def _from_pyomo_param(val: Union[ObjParameter, ObjVariable, float]) -> Any: @staticmethod def _from_pyomo_constraint( - val: pmo.constraint, *, eval_body: bool = False + val: ObjConstraint, *, eval_body: bool = False ) -> pd.Series: """Evaluate Pyomo constraint object. Args: - val (pmo.constraint): constraint object to be evaluated + val (ObjConstraint): constraint object to be evaluated Kwargs: eval_body (bool, optional): If True, attempt to evaluate the constraint object `body`, which will evaluate the @@ -912,3 +949,17 @@ def __init__(self, **kwds): def getname(self, *args, **kwargs): return self._update_name(pmo.variable.getname(self, *args, **kwargs)) + + +class ObjConstraint(pmo.constraint, CoordObj): + """ + A pyomo constraint with a `name` property setter (via the `pmo.constraint.getname` method) which replaces a list position as a name with a list of strings. + + """ + + def __init__(self, **kwds): + pmo.constraint.__init__(self, **kwds) + CoordObj.__init__(self) + + def getname(self, *args, **kwargs): + return self._update_name(pmo.constraint.getname(self, *args, **kwargs)) diff --git a/src/calliope/core/model.py b/src/calliope/core/model.py index 91a488214..8a079649c 100644 --- a/src/calliope/core/model.py +++ b/src/calliope/core/model.py @@ -30,7 +30,7 @@ from calliope.preprocess import model_run_from_dict, model_run_from_yaml from calliope.preprocess.model_data import ModelDataFactory -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) T = TypeVar( "T", @@ -103,7 +103,7 @@ def __init__( # try to set logging output format assuming python interactive. Will # use CLI logging format if model called from CLI - log_time(logger, self._timings, "model_creation", comment="Model: initialising") + log_time(LOGGER, self._timings, "model_creation", comment="Model: initialising") if isinstance(config, str): self._config_path = config model_run, debug_data = model_run_from_yaml(config, *args, **kwargs) @@ -134,7 +134,7 @@ def _init_from_model_run( """ self._model_run = model_run log_time( - logger, + LOGGER, self._timings, "model_run_creation", comment="Model: preprocessing stage 1 (model_run)", @@ -155,7 +155,7 @@ def _init_from_model_run( self._model_data_pre_time = data_pre_time self._model_data_stripped_keys = stripped_keys log_time( - logger, + LOGGER, self._timings, "model_data_original_creation", comment="Model: preprocessing stage 2 (model_data)", @@ -177,7 +177,7 @@ def _init_from_model_run( self.math_documentation.inputs = self._model_data log_time( - logger, + LOGGER, self._timings, "model_data_creation", comment="Model: preprocessing complete", @@ -206,7 +206,7 @@ def _init_from_model_data(self, model_data: xarray.Dataset) -> None: self._add_model_data_methods() log_time( - logger, + LOGGER, self._timings, "model_data_loaded", comment="Model: loaded model_data", @@ -231,7 +231,7 @@ def _add_model_data_methods(self): if len(results.data_vars) > 0: self.results = results log_time( - logger, + LOGGER, self._timings, "model_data_loaded", comment="Model: loaded model_data", @@ -375,7 +375,7 @@ def build( def _build(self, backend: T) -> T: log_time( - logger, + LOGGER, self._timings, "backend_parameters_generated", comment="Model: Generated optimisation problem parameters", @@ -397,7 +397,7 @@ def _build(self, backend: T) -> T: getattr(backend, f"add_{component}")(name) log_time( - logger, + LOGGER, self._timings, f"backend_{components}_generated", comment=f"Model: Generated optimisation problem {components}", @@ -454,7 +454,7 @@ def solve(self, force: bool = False, warmstart: bool = False) -> None: ) log_time( - logger, + LOGGER, self._timings, "solve_start", comment=f"Backend: starting model solve in {run_mode} mode", @@ -469,7 +469,7 @@ def solve(self, force: bool = False, warmstart: bool = False) -> None: ) log_time( - logger, + LOGGER, self._timings, "solver_exit", time_since_solve_start=True, @@ -593,6 +593,7 @@ def validate_math_strings(self, math_dict: dict) -> None: If all components of the dictionary are parsed successfully, this function will log a success message to the INFO logging level and return None. Otherwise, a calliope.ModelError will be raised with parsing issues listed. """ + validate_dict(math_dict, self._MATH_SCHEMA, "math") valid_math_element_names = [ *self.math["variables"].keys(), @@ -614,12 +615,12 @@ def validate_math_strings(self, math_dict: dict) -> None: parsed.parse_top_level_where(errors="ignore") parsed.parse_equations(set(valid_math_element_names), errors="ignore") if not parsed._is_valid: - collected_errors[f"({component_group}, {name})"] = parsed._errors + collected_errors[f"{component_group}:{name}"] = parsed._errors if collected_errors: exceptions.print_warnings_and_raise_errors( - during="math string parsing (marker indicates where parsing stopped, not strictly the equation term that caused the failure)", + during="math string parsing (marker indicates where parsing stopped, which might not be the root cause of the issue; sorry...)", errors=collected_errors, ) - logger.info("Model: validated math strings") + LOGGER.info("Model: validated math strings") diff --git a/src/calliope/core/util/logging.py b/src/calliope/core/util/logging.py index 7d724ac6e..e1bce7475 100644 --- a/src/calliope/core/util/logging.py +++ b/src/calliope/core/util/logging.py @@ -17,12 +17,11 @@ def setup_root_logger(verbosity, capture_warnings): - root_logger = logging.getLogger() # Get the root logger + root_logger = logging.getLogger("calliope") # Get the root logger # Remove any existing output handlers from root logger if root_logger.hasHandlers(): - for handler in root_logger.handlers: - root_logger.removeHandler(handler) + root_logger.handlers.clear() # Create a console log handler with decent formatting and attach it formatter = logging.Formatter( @@ -62,7 +61,7 @@ def set_log_verbosity( interactively. """ - backend_logger = logging.getLogger("calliope.backend.backend_model") + backend_logger = logging.getLogger("calliope.backend.backend_model.") if include_solver_output is True: backend_logger.setLevel("DEBUG") else: diff --git a/src/calliope/preprocess/model_data.py b/src/calliope/preprocess/model_data.py index a1be5fd5a..d8badd8a2 100644 --- a/src/calliope/preprocess/model_data.py +++ b/src/calliope/preprocess/model_data.py @@ -23,6 +23,8 @@ from calliope.core.attrdict import AttrDict from calliope.preprocess import checks, time +LOGGER = logging.getLogger(__name__) + class ModelDataFactory: UNWANTED_TECH_KEYS = [ @@ -300,9 +302,9 @@ def _model_run_dict_to_dataset( data_dict = self._reformat_model_run_dict( model_run_subdict, expected_nesting, **kwargs ) - if not data_dict: - logging.info( - f"No relevant data found for `{group_name}` group of parameters" + if data_dict is None: + LOGGER.debug( + f"Model build | No relevant data found for `{group_name}` group of parameters" ) return None df = self._dict_to_df(data_dict, data_dimensions, **kwargs) diff --git a/tests/common/lp_files/balance_conversion.lp b/tests/common/lp_files/balance_conversion.lp index 0e05d3ef7..dd15a53bf 100644 --- a/tests/common/lp_files/balance_conversion.lp +++ b/tests/common/lp_files/balance_conversion.lp @@ -6,22 +6,22 @@ objectives(dummy_obj)(0): s.t. -c_e_constraints(balance_conversion)(0)_: +c_e_constraints(balance_conversion)(a__test_conversion__2005_01_01_00_00)_: +1 variables(flow_out)(a__test_conversion__heat__2005_01_01_00_00) -0.9 variables(flow_in)(a__test_conversion__gas__2005_01_01_00_00) = 0.0 -c_e_constraints(balance_conversion)(1)_: +c_e_constraints(balance_conversion)(a__test_conversion__2005_01_01_01_00)_: +1 variables(flow_out)(a__test_conversion__heat__2005_01_01_01_00) -0.9 variables(flow_in)(a__test_conversion__gas__2005_01_01_01_00) = 0.0 -c_e_constraints(balance_conversion)(2)_: +c_e_constraints(balance_conversion)(b__test_conversion__2005_01_01_00_00)_: +1 variables(flow_out)(b__test_conversion__heat__2005_01_01_00_00) -0.9 variables(flow_in)(b__test_conversion__gas__2005_01_01_00_00) = 0.0 -c_e_constraints(balance_conversion)(3)_: +c_e_constraints(balance_conversion)(b__test_conversion__2005_01_01_01_00)_: +1 variables(flow_out)(b__test_conversion__heat__2005_01_01_01_00) -0.9 variables(flow_in)(b__test_conversion__gas__2005_01_01_01_00) = 0.0 diff --git a/tests/common/lp_files/flow_out_max.lp b/tests/common/lp_files/flow_out_max.lp index cbf44e4d8..05d19047f 100644 --- a/tests/common/lp_files/flow_out_max.lp +++ b/tests/common/lp_files/flow_out_max.lp @@ -6,62 +6,62 @@ objectives(dummy_obj)(0): s.t. -c_u_constraints(flow_out_max)(0)_: +c_u_constraints(flow_out_max)(a__test_supply_elec__electricity__2005_01_01_00_00)_: +1 variables(flow_out)(a__test_supply_elec__electricity__2005_01_01_00_00) -1.0 variables(flow_cap)(a__test_supply_elec) <= 0.0 -c_u_constraints(flow_out_max)(1)_: +c_u_constraints(flow_out_max)(a__test_supply_elec__electricity__2005_01_01_01_00)_: -1.0 variables(flow_cap)(a__test_supply_elec) +1 variables(flow_out)(a__test_supply_elec__electricity__2005_01_01_01_00) <= 0.0 -c_u_constraints(flow_out_max)(2)_: +c_u_constraints(flow_out_max)(a__test_transmission_elec_b__electricity__2005_01_01_00_00)_: +1 variables(flow_out)(a__test_transmission_elec_b__electricity__2005_01_01_00_00) -1.0 variables(flow_cap)(a__test_transmission_elec_b) <= 0.0 -c_u_constraints(flow_out_max)(3)_: +c_u_constraints(flow_out_max)(a__test_transmission_elec_b__electricity__2005_01_01_01_00)_: -1.0 variables(flow_cap)(a__test_transmission_elec_b) +1 variables(flow_out)(a__test_transmission_elec_b__electricity__2005_01_01_01_00) <= 0.0 -c_u_constraints(flow_out_max)(4)_: +c_u_constraints(flow_out_max)(a__test_transmission_heat_b__heat__2005_01_01_00_00)_: +1 variables(flow_out)(a__test_transmission_heat_b__heat__2005_01_01_00_00) -1.0 variables(flow_cap)(a__test_transmission_heat_b) <= 0.0 -c_u_constraints(flow_out_max)(5)_: +c_u_constraints(flow_out_max)(a__test_transmission_heat_b__heat__2005_01_01_01_00)_: -1.0 variables(flow_cap)(a__test_transmission_heat_b) +1 variables(flow_out)(a__test_transmission_heat_b__heat__2005_01_01_01_00) <= 0.0 -c_u_constraints(flow_out_max)(6)_: +c_u_constraints(flow_out_max)(b__test_supply_elec__electricity__2005_01_01_00_00)_: +1 variables(flow_out)(b__test_supply_elec__electricity__2005_01_01_00_00) -1.0 variables(flow_cap)(b__test_supply_elec) <= 0.0 -c_u_constraints(flow_out_max)(7)_: +c_u_constraints(flow_out_max)(b__test_supply_elec__electricity__2005_01_01_01_00)_: -1.0 variables(flow_cap)(b__test_supply_elec) +1 variables(flow_out)(b__test_supply_elec__electricity__2005_01_01_01_00) <= 0.0 -c_u_constraints(flow_out_max)(8)_: +c_u_constraints(flow_out_max)(b__test_transmission_elec_a__electricity__2005_01_01_00_00)_: +1 variables(flow_out)(b__test_transmission_elec_a__electricity__2005_01_01_00_00) -1.0 variables(flow_cap)(b__test_transmission_elec_a) <= 0.0 -c_u_constraints(flow_out_max)(9)_: +c_u_constraints(flow_out_max)(b__test_transmission_elec_a__electricity__2005_01_01_01_00)_: -1.0 variables(flow_cap)(b__test_transmission_elec_a) +1 variables(flow_out)(b__test_transmission_elec_a__electricity__2005_01_01_01_00) <= 0.0 -c_u_constraints(flow_out_max)(10)_: +c_u_constraints(flow_out_max)(b__test_transmission_heat_a__heat__2005_01_01_00_00)_: +1 variables(flow_out)(b__test_transmission_heat_a__heat__2005_01_01_00_00) -1.0 variables(flow_cap)(b__test_transmission_heat_a) <= 0.0 -c_u_constraints(flow_out_max)(11)_: +c_u_constraints(flow_out_max)(b__test_transmission_heat_a__heat__2005_01_01_01_00)_: -1.0 variables(flow_cap)(b__test_transmission_heat_a) +1 variables(flow_out)(b__test_transmission_heat_a__heat__2005_01_01_01_00) <= 0.0 diff --git a/tests/common/lp_files/source_max.lp b/tests/common/lp_files/source_max.lp index 0b65e9fc3..2aac6dba2 100644 --- a/tests/common/lp_files/source_max.lp +++ b/tests/common/lp_files/source_max.lp @@ -6,22 +6,22 @@ objectives(dummy_obj)(0): s.t. -c_u_constraints(my_constraint)(0)_: +c_u_constraints(my_constraint)(a__test_supply_plus__2005_01_01_00_00)_: +1 variables(source_use)(a__test_supply_plus__2005_01_01_00_00) -24.0 variables(source_cap)(a__test_supply_plus) <= 0.0 -c_u_constraints(my_constraint)(1)_: +c_u_constraints(my_constraint)(a__test_supply_plus__2005_01_02_00_00)_: -24.0 variables(source_cap)(a__test_supply_plus) +1 variables(source_use)(a__test_supply_plus__2005_01_02_00_00) <= 0.0 -c_u_constraints(my_constraint)(2)_: +c_u_constraints(my_constraint)(b__test_supply_plus__2005_01_01_00_00)_: +1 variables(source_use)(b__test_supply_plus__2005_01_01_00_00) -24.0 variables(source_cap)(b__test_supply_plus) <= 0.0 -c_u_constraints(my_constraint)(3)_: +c_u_constraints(my_constraint)(b__test_supply_plus__2005_01_02_00_00)_: -24.0 variables(source_cap)(b__test_supply_plus) +1 variables(source_use)(b__test_supply_plus__2005_01_02_00_00) <= 0.0 diff --git a/tests/common/lp_files/storage_max.lp b/tests/common/lp_files/storage_max.lp index 8a078cc54..1c9bd0fcc 100644 --- a/tests/common/lp_files/storage_max.lp +++ b/tests/common/lp_files/storage_max.lp @@ -2,26 +2,26 @@ min objectives(dummy_obj)(0): -+2 ONE_VAR_CONSTANT ++2.0 ONE_VAR_CONSTANT s.t. -c_u_constraints(storage_max)(0)_: +c_u_constraints(storage_max)(a__test_storage__2005_01_01_00_00)_: +1 variables(storage)(a__test_storage__2005_01_01_00_00) -1 variables(storage_cap)(a__test_storage) <= 0.0 -c_u_constraints(storage_max)(1)_: +c_u_constraints(storage_max)(a__test_storage__2005_01_01_01_00)_: -1 variables(storage_cap)(a__test_storage) +1 variables(storage)(a__test_storage__2005_01_01_01_00) <= 0.0 -c_u_constraints(storage_max)(2)_: +c_u_constraints(storage_max)(b__test_storage__2005_01_01_00_00)_: +1 variables(storage)(b__test_storage__2005_01_01_00_00) -1 variables(storage_cap)(b__test_storage) <= 0.0 -c_u_constraints(storage_max)(3)_: +c_u_constraints(storage_max)(b__test_storage__2005_01_01_01_00)_: -1 variables(storage_cap)(b__test_storage) +1 variables(storage)(b__test_storage__2005_01_01_01_00) <= 0.0 @@ -34,4 +34,4 @@ bounds 0 <= variables(storage)(b__test_storage__2005_01_01_00_00) <= +inf 0 <= variables(storage_cap)(b__test_storage) <= 15.0 0 <= variables(storage)(b__test_storage__2005_01_01_01_00) <= +inf -end \ No newline at end of file +end diff --git a/tests/test_backend_parsing.py b/tests/test_backend_parsing.py index acd351183..cf03a01ef 100644 --- a/tests/test_backend_parsing.py +++ b/tests/test_backend_parsing.py @@ -1,3 +1,4 @@ +import logging from io import StringIO from unittest.mock import patch @@ -475,7 +476,7 @@ def test_extend_equation_list_with_expression_group_missing_sub_expression( ) assert check_error_or_warning( excinfo, - "(constraints, foo): Undefined sub_expressions found in equation: {'ba'}", + "constraints:foo: Undefined sub_expressions found in equation: {'ba'}", ) @pytest.mark.parametrize( @@ -548,7 +549,7 @@ def test_extend_equation_list_with_expression_group_missing_slices( ) assert check_error_or_warning( excinfo, - "(constraints, foo): Undefined slices found in equation: {'node1'}", + "constraints:foo: Undefined slices found in equation: {'node1'}", ) @pytest.mark.parametrize( @@ -593,7 +594,7 @@ def test_raise_caught_errors(self, component_obj, is_valid): else: with pytest.raises(calliope.exceptions.ModelError) as excinfo: component_obj.raise_caught_errors() - assert check_error_or_warning(excinfo, ["\n * (constraints, foo):"]) + assert check_error_or_warning(excinfo, ["\n * constraints:foo:"]) def test_parse_equations_fail( self, obj_with_sub_expressions_and_slices, valid_math_element_names @@ -602,7 +603,7 @@ def test_parse_equations_fail( with pytest.raises(calliope.exceptions.ModelError) as excinfo: component_obj.parse_equations(valid_math_element_names, errors="raise") expected_err_string = """ - * (constraints, my_constraint): + * constraints:my_constraint: * equations[0].expression (line 1, char 5): bar = 1 ^""" assert check_error_or_warning(excinfo, expected_err_string) @@ -628,13 +629,11 @@ def test_combine_exists_and_foreach_all_permutations( assert not BASE_DIMS.difference(where.dims) assert not set(foreach).difference(where.dims) - def test_foreach_unidentified_name(self, dummy_model_data, component_obj): + def test_foreach_unidentified_name(self, caplog, dummy_model_data, component_obj): component_obj.sets = ["nodes", "techs", "foos"] - with pytest.warns(calliope.exceptions.BackendWarning) as excinfo: - component_obj.combine_exists_and_foreach(dummy_model_data) - assert check_error_or_warning( - excinfo, "Not generating optimisation problem object `foo`" - ) + caplog.set_level(logging.DEBUG) + component_obj.combine_exists_and_foreach(dummy_model_data) + assert "indexed over unidentified set names" in caplog.text def test_evaluate_where_to_false(self, dummy_model_data, component_obj): component_obj.parse_top_level_where() @@ -649,11 +648,15 @@ def test_parse_top_level_where_fail(self, component_obj): assert check_error_or_warning(excinfo, "Errors during math string parsing") def test_generate_top_level_where_array_break_at_foreach( - self, dummy_model_data, component_obj + self, caplog, dummy_model_data, component_obj ): component_obj.sets = ["nodes", "techs", "foos"] - with pytest.warns(calliope.exceptions.BackendWarning): - where_array = component_obj.generate_top_level_where_array(dummy_model_data) + caplog.set_level(logging.DEBUG) + where_array = component_obj.generate_top_level_where_array(dummy_model_data) + + assert "indexed over unidentified set names: `{'foos'}`" in caplog.text + assert "'foreach' does not apply anywhere." in caplog.text + assert "'where' does not apply anywhere." not in caplog.text assert not where_array.any() assert not where_array.shape @@ -667,14 +670,19 @@ def test_generate_top_level_where_array_break_at_top_level_where( assert not set(component_obj.sets).difference(where_array.dims) def test_generate_top_level_where_array_no_break_no_align( - self, dummy_model_data, component_obj + self, caplog, dummy_model_data, component_obj ): component_obj.sets = ["nodes", "techs", "foos"] component_obj._unparsed["where"] = "all_nan" - with pytest.warns(calliope.exceptions.BackendWarning): - where_array = component_obj.generate_top_level_where_array( - dummy_model_data, break_early=False, align_to_foreach_sets=False - ) + caplog.set_level(logging.DEBUG) + + where_array = component_obj.generate_top_level_where_array( + dummy_model_data, break_early=False, align_to_foreach_sets=False + ) + assert "indexed over unidentified set names: `{'foos'}`" in caplog.text + assert "'foreach' does not apply anywhere." in caplog.text + assert "'where' does not apply anywhere." in caplog.text + assert not where_array.any() assert set(component_obj.sets).difference(where_array.dims) == {"foos"} @@ -694,7 +702,7 @@ def test_evaluate_where_fail(self, component_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: component_obj.parse_top_level_where() expected_err_string = """ - * (constraints, foo): + * constraints:foo: * where (line 1, char 1): 1[] ^""" assert check_error_or_warning(excinfo, expected_err_string) diff --git a/tests/test_backend_pyomo.py b/tests/test_backend_pyomo.py index da72772ab..179014038 100755 --- a/tests/test_backend_pyomo.py +++ b/tests/test_backend_pyomo.py @@ -2181,7 +2181,7 @@ def test_add_two_same_obj(self, simple_supply): ) assert check_error_or_warning( excinfo, - "(objective, foo:1): trying to set two equations for the same component.", + "objectives:foo:1 | trying to set two equations for the same component.", ) def test_add_valid_obj(self, simple_supply): @@ -2220,6 +2220,15 @@ def test_object_string_representation(self, simple_supply): "variables", ), ("flow_eff", {"nodes": "a", "techs": "test_supply_elec"}, "parameters"), + ( + "system_balance", + { + "nodes": "a", + "carriers": "electricity", + "timesteps": "2005-01-01 00:00", + }, + "constraints", + ), ], ) def test_verbose_strings(self, simple_supply_longnames, objname, dims, objtype): @@ -2248,7 +2257,7 @@ def test_verbose_strings_constraint(self, simple_supply_longnames): obj.sel(dims).body.item() == f"parameters[flow_eff][{flow_eff_dims}]*variables[flow_in][{', '.join(dims[i] for i in obj.dims)}]" ) - assert not obj.coords_in_name + assert obj.coords_in_name def test_verbose_strings_expression(self, simple_supply_longnames): dims = { @@ -2284,7 +2293,7 @@ def test_update_parameter(self, simple_supply): def test_update_parameter_one_val(self, caplog, simple_supply): updated_param = 1000 new_dims = {"nodes", "techs"} - caplog.set_level(logging.WARNING) + caplog.set_level(logging.DEBUG) simple_supply.backend.update_parameter("flow_eff", updated_param) @@ -2314,12 +2323,13 @@ def test_update_parameter_add_dim(self, caplog, simple_supply): ) refs_to_update = {"balance_demand", "balance_transmission"} - caplog.set_level(logging.WARNING) + caplog.set_level(logging.DEBUG) simple_supply.backend.update_parameter("flow_eff", updated_param) assert ( - f"Defining values for a previously fully/partially undefined parameter. The optimisation problem components {refs_to_update} will be re-built." + "Defining values for a previously fully/partially undefined parameter. " + f"The optimisation problem components {refs_to_update} will be re-built." in caplog.text ) @@ -2338,7 +2348,8 @@ def test_update_parameter_replace_undefined(self, caplog, simple_supply): simple_supply.backend.update_parameter("parasitic_eff", updated_param) assert ( - f"Defining values for a previously fully/partially undefined parameter. The optimisation problem components {refs_to_update} will be re-built." + "Defining values for a previously fully/partially undefined parameter. " + f"The optimisation problem components {refs_to_update} will be re-built." in caplog.text ) @@ -2351,9 +2362,9 @@ def test_update_parameter_replace_undefined(self, caplog, simple_supply): def test_update_parameter_no_refs_to_update(self, simple_supply): """units_equals isn't defined in the inputs, so is a dimensionless value in the pyomo object, assigned its default value. - Updating it doesn't change the model in any way, because none of the existing constraints/expressions depend on it. Therefore, no warning is raised + Updating it doesn't change the model in any way, because none of the existing constraints/expressions depend on it. + Therefore, no warning is raised. """ - updated_param = 1 simple_supply.backend.update_parameter("units_equals", updated_param) diff --git a/tests/test_constraint_results.py b/tests/test_constraint_results.py index c4b0a7402..493c9f61a 100644 --- a/tests/test_constraint_results.py +++ b/tests/test_constraint_results.py @@ -118,11 +118,10 @@ def _run_model(feasibility, cap_val): "run.bigM": 1e3, # Allow setting resource and flow_cap_max/equals to force infeasibility "techs.test_supply_elec.constraints": { - "resource": cap_val, + "source_equals": cap_val, "flow_eff": 1, "flow_cap_equals": 15, }, - "techs.test_supply_elec.switches.force_resource": True, } model = build_model( override_dict=override_dict, scenario="investment_costs" diff --git a/tests/test_core_model.py b/tests/test_core_model.py index 7c082ef9b..7bcc6141b 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -303,14 +303,14 @@ def test_custom_math(self, caplog, simple_supply, equation, where): def test_custom_math_fails(self, simple_supply, component_dict, both_fail): math_dict = {"constraints": {"foo": component_dict}} errors_to_check = [ - "math string parsing (marker indicates where parsing stopped, not strictly the equation term that caused the failure)", - " * (constraints, foo):", + "math string parsing (marker indicates where parsing stopped, which might not be the root cause of the issue; sorry...)", + " * constraints:foo:", "equations[0].expression", "where", ] if both_fail: math_dict["constraints"]["bar"] = component_dict - errors_to_check.append("* (constraints, bar):") + errors_to_check.append("* constraints:bar:") else: math_dict["constraints"]["bar"] = {"equations": [{"expression": "1 == 1"}]} diff --git a/tests/test_core_util.py b/tests/test_core_util.py index 82651dc31..b68f3edf1 100644 --- a/tests/test_core_util.py +++ b/tests/test_core_util.py @@ -45,32 +45,30 @@ def test_memoize_instancemethod(self): class TestLogging: - def test_set_log_verbosity(self): - calliope.set_log_verbosity("CRITICAL", include_solver_output=True) - - assert logging.getLogger("calliope").getEffectiveLevel() == 50 - assert logging.getLogger("py.warnings").getEffectiveLevel() == 50 - assert ( - logging.getLogger("calliope.backend.backend_model").getEffectiveLevel() - == 10 - ) - - calliope.set_log_verbosity("CRITICAL", include_solver_output=False) + @pytest.mark.parametrize( + ["level", "include_solver_output", "expected_level", "expected_solver_level"], + [ + ("CRITICAL", True, 50, 10), + ("CRITICAL", False, 50, 50), + ("info", True, 20, 10), + ], + ) + def test_set_log_verbosity( + self, level, include_solver_output, expected_level, expected_solver_level + ): + calliope.set_log_verbosity(level, include_solver_output=include_solver_output) - assert logging.getLogger("calliope").getEffectiveLevel() == 50 - assert logging.getLogger("py.warnings").getEffectiveLevel() == 50 + assert logging.getLogger("calliope").getEffectiveLevel() == expected_level + assert logging.getLogger("py.warnings").getEffectiveLevel() == expected_level assert ( logging.getLogger("calliope.backend.backend_model").getEffectiveLevel() - == 50 + == expected_level ) - - calliope.set_log_verbosity() - - assert logging.getLogger("calliope").getEffectiveLevel() == 20 - assert logging.getLogger("py.warnings").getEffectiveLevel() == 20 assert ( - logging.getLogger("calliope.backend.backend_model").getEffectiveLevel() - == 10 + logging.getLogger( + "calliope.backend.backend_model." + ).getEffectiveLevel() + == expected_solver_level ) def test_timing_log(self): diff --git a/tests/test_io.py b/tests/test_io.py index 5ceddc73e..67ad697f9 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -13,7 +13,7 @@ class TestIO: @pytest.fixture(scope="module") def vars_to_add_attrs(self): - return ["resource", "energy_cap"] + return ["source_max", "flow_cap"] @pytest.fixture(scope="module") def model(self, vars_to_add_attrs): diff --git a/tests/test_model_data.py b/tests/test_model_data.py index 419e87049..d338a20d6 100644 --- a/tests/test_model_data.py +++ b/tests/test_model_data.py @@ -264,12 +264,12 @@ def test_dict_to_df_is_link(self, model_data): assert df.values[0] == list(data_dict.values())[0] def test_model_run_dict_to_dataset_no_match(self, caplog, model_data): - with caplog.at_level(logging.INFO): - model_data._model_run_dict_to_dataset( - "foo", "node", ["foobar"], ["nodes", "foobar"] - ) - records = [r.msg for r in caplog.records] - assert "No relevant data found for `foo` group of parameters" in records + caplog.set_level(logging.DEBUG, logger="calliope.preprocess.model_data") + + model_data._model_run_dict_to_dataset( + "foo", "node", ["FOO"], ["nodes", "foobar"] + ) + assert "No relevant data found for `foo` group of parameters" in caplog.text @pytest.mark.parametrize( ("data", "idx", "cols", "out_idx"),