diff --git a/doc/api/edisgo.tools.rst b/doc/api/edisgo.tools.rst index 2c268568c..1bc306a87 100644 --- a/doc/api/edisgo.tools.rst +++ b/doc/api/edisgo.tools.rst @@ -9,14 +9,6 @@ edisgo.tools.config module :undoc-members: :show-inheritance: -edisgo.tools.edisgo\_run module --------------------------------- - -.. automodule:: edisgo.tools.edisgo_run - :members: - :undoc-members: - :show-inheritance: - edisgo.tools.geo module ------------------------ @@ -33,6 +25,14 @@ edisgo.tools.geopandas\_helper module :undoc-members: :show-inheritance: +edisgo.tools.logger module +---------------------------------------- + +.. automodule:: edisgo.tools.logger + :members: + :undoc-members: + :show-inheritance: + edisgo.tools.networkx\_helper module ---------------------------------------- diff --git a/doc/whatsnew/v0-2-0.rst b/doc/whatsnew/v0-2-0.rst index 99646d25c..4a2d8af6a 100644 --- a/doc/whatsnew/v0-2-0.rst +++ b/doc/whatsnew/v0-2-0.rst @@ -9,6 +9,7 @@ Changes * added pre-commit hooks (flake8, black, isort, pyupgrade) `#229 `_ * added issue and pull request templates `#220 `_ * added Windows installation yml and documentation +* added functionality to set up different loggers with individual logging levels and where to write output `#295 `_ * added integrity checks of eDisGo object `#231 `_ * added functionality to save to and load from zip archive `#216 `_ * added option to not raise error in case power flow did not converge `#207 `_ diff --git a/edisgo/flex_opt/charging_strategies.py b/edisgo/flex_opt/charging_strategies.py index c92057004..9946fc227 100644 --- a/edisgo/flex_opt/charging_strategies.py +++ b/edisgo/flex_opt/charging_strategies.py @@ -32,7 +32,7 @@ ], } -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) # TODO: the dummy timeseries should be as long as the simulated days and not diff --git a/edisgo/network/electromobility.py b/edisgo/network/electromobility.py index 72739aa85..de6190391 100644 --- a/edisgo/network/electromobility.py +++ b/edisgo/network/electromobility.py @@ -12,7 +12,7 @@ if "READTHEDOCS" not in os.environ: import geopandas as gpd -logger = logging.getLogger("edisgo") +logger = logging.getLogger(__name__) COLUMNS = { "charging_processes_df": [ diff --git a/edisgo/tools/edisgo_run.py b/edisgo/tools/edisgo_run.py deleted file mode 100755 index 77e1f8aa9..000000000 --- a/edisgo/tools/edisgo_run.py +++ /dev/null @@ -1,612 +0,0 @@ -import argparse -import glob -import logging -import multiprocessing as mp -import os -import sys - -import multiprocess as mp2 -import pandas as pd - -from edisgo import EDisGo -from edisgo.flex_opt.exceptions import MaximumIterationError -from edisgo.network.results import Results - - -def setup_logging( - logfilename=None, - logfile_loglevel="debug", - console_loglevel="info", - **logging_kwargs -): - # a dict to help with log level definition - loglevel_dict = { - "info": logging.INFO, - "debug": logging.DEBUG, - "warn": logging.WARNING, - "warning": logging.WARNING, - "error": logging.ERROR, - "critical": logging.CRITICAL, - } - - if not (logfilename): - logfilename = "edisgo_run.log" - - logging.basicConfig( - filename=logfilename, - format="%(asctime)s - %(name)s -" + " %(levelname)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=loglevel_dict[logfile_loglevel], - ) - - root_logger = logging.getLogger() - - console_stream = logging.StreamHandler() - console_stream.setLevel(loglevel_dict[console_loglevel]) - console_formatter = logging.Formatter( - fmt="%(asctime)s - %(name)s -" + " %(levelname)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - ) - console_stream.setFormatter(console_formatter) - - # add stream handler to root logger - root_logger.addHandler(console_stream) - - return root_logger - - -def run_edisgo_basic( - ding0_path, generator_scenario=None, analysis="worst-case", *edisgo_grid -): - """ - Determine network expansion costs for given ding0 grid and scenario. - - Parameters - ---------- - ding0_path : str - Path to ding0 network csv data. - - analysis : str - Either 'worst-case' or 'timeseries'. - - generator_scenario : None or :obj:`str` - If provided defines which scenario of future generator park to use - and invokes import of these generators. Possible options are 'nep2035' - and 'ego100'. - - edisgo_grid : :class:`~.EDisGo` (optional) - If an EDisGo object is provided it is used instead of creating a new - object using parameters `ding0_path` and `analysis`. - - Returns - ------- - edisgo_grid : :class:`~.EDisGo` - costs : :pandas:`pandas.DataFrame` - Costs of network expansion - grid_issues : dict - Log for remaining grid issues after network expansion. For grids - resulting in an error this gives the error message. - - """ - - grid_issues = {} - - if edisgo_grid: # if an edisgo_grid is passed in arg then ignore everything else - edisgo_grid = edisgo_grid[0] - else: - try: - if "worst-case" in analysis: - edisgo_grid = EDisGo( - ding0_grid=ding0_path, worst_case_analysis=analysis - ) - elif "timeseries" in analysis: - edisgo_grid = EDisGo( - ding0_grid=ding0_path, - timeseries_generation_fluctuating="oedb", - timeseries_load="demandlib", - ) - except FileNotFoundError as e: - return ( - None, - pd.DataFrame(), - {"network": edisgo_grid, "msg": str(e)}, - ) - - logging.info("Grid expansion for MV network {}".format(edisgo_grid.topology.id)) - - # Import generators - if generator_scenario: - logging.info("Grid expansion for scenario '{}'.".format(generator_scenario)) - edisgo_grid.import_generators(generator_scenario=generator_scenario) - else: - logging.info("Grid expansion with status quo generator capacities.") - - try: - # Do network reinforcement - edisgo_grid.reinforce() - - # Get costs - costs_grouped = edisgo_grid.network.results.grid_expansion_costs.groupby( - ["type"] - ).sum() - costs = pd.DataFrame( - costs_grouped.values, - columns=costs_grouped.columns, - index=[ - [edisgo_grid.network.id] * len(costs_grouped), - costs_grouped.index, - ], - ).reset_index() - costs.rename(columns={"level_0": "network"}, inplace=True) - - grid_issues["network"] = None - grid_issues["msg"] = None - - logging.info("SUCCESS!") - except MaximumIterationError: - grid_issues["network"] = edisgo_grid.network.id - grid_issues["msg"] = str(edisgo_grid.network.results.unresolved_issues) - costs = pd.DataFrame(dtype=float) - logging.warning("Unresolved issues left after network expansion.") - except Exception as e: - grid_issues["network"] = edisgo_grid.network.id - grid_issues["msg"] = repr(e) - costs = pd.DataFrame(dtype=float) - logging.exception() - - return edisgo_grid, costs, grid_issues - - -def run_edisgo_twice(run_args): - """ - Run network analysis twice on same network: once w/ and once w/o new generators - - ToDo: adapt to refactored code! - - First run without connection of new generators approves sufficient network - hosting capacity. Otherwise, network is reinforced. - Second run assessment network extension needs in terms of RES integration - - Parameters - ---------- - run_args : list - Optional parameters for :func:`run_edisgo_basic`. - - Returns - ------- - all_costs_before_geno_import : :pandas:`pandas.Dataframe` - Grid extension cost before network connection of new generators - all_grid_issues_before_geno_import : dict - Remaining overloading or over-voltage issues in network - all_costs : :pandas:`pandas.Dataframe` - Grid extension cost due to network connection of new generators - all_grid_issues : dict - Remaining overloading or over-voltage issues in network - """ - - # base case with no generator import - ( - edisgo_grid, - costs_before_geno_import, - grid_issues_before_geno_import, - ) = run_edisgo_basic(*run_args) - - if edisgo_grid: - # clear the results object - edisgo_grid.results = Results(edisgo_grid) - edisgo_grid.config = None - - # case after generator import - # run_args = [ding0_filename] - # run_args.extend(run_args_opt) - run_args.append(edisgo_grid) - - _, costs, grid_issues = run_edisgo_basic(*run_args) - - return ( - costs_before_geno_import, - grid_issues_before_geno_import, - costs, - grid_issues, - ) - else: - return ( - costs_before_geno_import, - grid_issues_before_geno_import, - costs_before_geno_import, - grid_issues_before_geno_import, - ) - - -def run_edisgo_pool( - ding0_file_list, - run_args_opt=[None, "worst-case"], - workers=mp.cpu_count(), - worker_lifetime=1, -): - """ - Use python multiprocessing toolbox for parallelization - - Several grids are analyzed in parallel. - - Parameters - ---------- - ding0_file_list : list - Ding0 network data file names - run_args_opt : list - eDisGo options, see :func:`run_edisgo_basic` and - :func:`run_edisgo_twice`, has to contain generator_scenario and analysis as - entries - workers: int - Number of parallel process - worker_lifetime : int - Bunch of grids sequentially analyzed by a worker - - Returns - ------- - all_costs_before_geno_import : list - Grid extension cost before network connection of new generators - all_grid_issues_before_geno_import : list - Remaining overloading or over-voltage issues in network - all_costs : list - Grid extension cost due to network connection of new generators - all_grid_issues : list - Remaining overloading or over-voltage issues in network - """ - - def collect_pool_results(result): - results.append(result) - - results = [] - - pool = mp.Pool(workers, maxtasksperchild=worker_lifetime) - - for file in ding0_file_list: - edisgo_args = [file] + run_args_opt - pool.apply_async( - func=run_edisgo_twice, - args=(edisgo_args,), - callback=collect_pool_results, - ) - - pool.close() - pool.join() - - # process results data - all_costs_before_geno_import = [r[0] for r in results] - all_grid_issues_before_geno_import = [r[1] for r in results] - all_costs = [r[2] for r in results] - all_grid_issues = [r[3] for r in results] - - return ( - all_costs_before_geno_import, - all_grid_issues_before_geno_import, - all_costs, - all_grid_issues, - ) - - -def run_edisgo_pool_flexible( - ding0_id_list, - func, - func_arguments, - workers=mp2.cpu_count(), - worker_lifetime=1, -): - """ - Use python multiprocessing toolbox for parallelization - - Several grids are analyzed in parallel based on your custom function that - defines the specific application of eDisGo. - - Parameters - ---------- - ding0_id_list : list of int - List of ding0 network data IDs (also known as HV/MV substation IDs) - func : any function - Your custom function that shall be parallelized - func_arguments : tuple - Arguments to custom function ``func`` - workers: int - Number of parallel process - worker_lifetime : int - Bunch of grids sequentially analyzed by a worker - - Notes - ----- - Please note, the following requirements for the custom function which is to - be executed in parallel - - #. It must return an instance of the type :class:`~.edisgo.EDisGo`. - #. The first positional argument is the MV network district id (as int). It is - prepended to the tuple of arguments ``func_arguments`` - - - Returns - ------- - containers : dict of :class:`~.edisgo.EDisGo` - Dict of EDisGo instances keyed by its ID - """ - - def collect_pool_results(result): - """ - Store results from parallelized calculation in structured manner - - Parameters - ---------- - result: :class:`~.edisgo.EDisGo` - """ - results.update({result.network.id: result}) - - results = {} - - pool = mp2.Pool(workers, maxtasksperchild=worker_lifetime) - - def error_callback(key): - return lambda o: results.update({key: o}) - - for ding0_id in ding0_id_list: - edisgo_args = (ding0_id, *func_arguments) - pool.apply_async( - func=func, - args=edisgo_args, - callback=collect_pool_results, - error_callback=error_callback(ding0_id), - ) - - pool.close() - pool.join() - - return results - - -def edisgo_run(): - # create the argument parser - example_text = """Examples - - ...assumes all files located in PWD. - - Analyze a single network in 'worst-case' - - edisgo_run -f ding0_grids__997.pkl -wc - - - Analyze multiple grids in 'worst-case' using parallelization. Grid IDs are - specified by the grids_list.txt. - - edisgo_run -ds '' grids_list.txt ding0_grids__{}.pkl -wc --parallel - """ - parser = argparse.ArgumentParser( - description="Commandline running" + "of eDisGo", - epilog=example_text, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - - # add the verbosity arguments - - ding0_files_parsegroup = parser.add_mutually_exclusive_group(required=True) - - ding0_files_parsegroup.add_argument( - "-f", - "--ding0-file-path", - type=str, - action="store", - dest="ding0_filename", - help="Path to a single ding0 file.", - ) - ding0_files_parsegroup.add_argument( - "-d", - "--ding0-files-directory", - type=str, - action="store", - dest="ding0_dirglob", - help="Path to a directory of ding0 files " - + "along with a file name pattern for glob input.", - ) - ding0_files_parsegroup.add_argument( - "-ds", - "--ding0-files-directory-selection", - type=str, - nargs=3, - action="store", - dest="ding0_dir_select", - help="Path to a directory of ding0 files, " - + "Path to file with list of network district numbers " - + "(one number per line), " - + "and file name template using {} where number " - + "is to be inserted . Convention is to use " - + "a double underscore before network district number " - + " like so '__{}'.", - ) - - analysis_parsegroup = parser.add_mutually_exclusive_group() - - analysis_parsegroup.add_argument( - "-wc", - "--worst-case", - action="store_true", - help="Performs a worst-case simulation with " + "a single snapshot", - ) - - analysis_parsegroup.add_argument( - "-ts", - "--timeseries", - action="store_true", - help="Performs a worst-case simulation with " + "a time-series", - ) - - parser.add_argument( - "-s", - "--scenario", - type=str, - default=None, - choices=[None, "nep2035", "ego100"], - help="'None' or 'string'\n" - + "If provided defines which scenario " - + "of future generator park to use " - + "and invokes import of these generators.\n" - + "Possible options are 'nep2035'and 'ego100'.", - ) - - parser.add_argument( - "-o", - "--output-dir", - nargs="?", - metavar="/path/to/output/", - dest="out_dir", - type=str, - default=os.path.join(sys.path[0]), - help="Absolute path to results data location.", - ) - - parser.add_argument( - "-p", - "--parallel", - action="store_true", - help="Parallel execution of multiple " - "grids. Parallelization is provided " - "by multiprocessing.", - ) - - parser.add_argument( - "-w", - "--workers", - nargs="?", - metavar="1..inf", - dest="workers", - type=int, - default=mp.cpu_count(), - help="Number of workers in parallel. In other words, " - "cores that are used for parallelization.", - ) - - parser.add_argument( - "-lw", - "--lifetime-workers", - nargs="?", - metavar="1..inf", - dest="worker_lifetime", - type=int, - default=None, - help="Lifetime of a worker of the cluster doing the " - "work. The lifetime is given is number of jobs a" - " worker does before it is replaced by a freshly " - "new one." - "The default sets the lifetime to the pools " - "lifetime. This can cause memory issues!", - ) - - args = parser.parse_args(sys.argv[1:]) - - # get current time for output file names - exec_time = pd.datetime.now().strftime("%Y-%m-%d_%H%M") - - logger = setup_logging( # noqa: F841 - logfilename="test.log", - logfile_loglevel="debug", - console_loglevel="info", - ) - - # get the list of files to run on - if args.ding0_filename: - ding0_file_list = [args.ding0_filename] - - elif args.ding0_dirglob: - ding0_file_list = glob.glob(args.ding0_dirglob) - - elif args.ding0_dir_select: - with open(args.ding0_dir_select[1], "r") as file_handle: - ding0_file_list_grid_district_numbers = list(file_handle) - ding0_file_list_grid_district_numbers = [ - _.splitlines()[0] for _ in ding0_file_list_grid_district_numbers - ] - - ding0_file_list = map( - lambda x: args.ding0_dir_select[0] + args.ding0_dir_select[2].format(x), - ding0_file_list_grid_district_numbers, - ) - else: - raise FileNotFoundError("Some of the Arguments for input files are missing.") - - # this is the serial version of the run system - run_func = run_edisgo_basic # noqa: F841 - - run_args_opt_no_scenario = [None] - run_args_opt = [args.scenario] - if args.worst_case: - run_args_opt_no_scenario.append("worst-case") - run_args_opt.append("worst-case") - elif args.timeseries: - run_args_opt_no_scenario.append("timeseries") - run_args_opt.append("timeseries") - - all_costs_before_geno_import = [] - all_grid_issues_before_geno_import = {"network": [], "msg": []} - all_costs = [] - all_grid_issues = {"network": [], "msg": []} - - if not args.parallel: - for ding0_filename in ding0_file_list: - grid_district = _get_griddistrict(ding0_filename) # noqa: F821, F841 - - run_args = [ding0_filename] - run_args.extend(run_args_opt_no_scenario) - - ( - costs_before_geno_import, - grid_issues_before_geno_import, - costs, - grid_issues, - ) = run_edisgo_twice(run_args) - - all_costs_before_geno_import.append(costs_before_geno_import) - all_grid_issues_before_geno_import["network"].append( - grid_issues_before_geno_import["network"] - ) - all_grid_issues_before_geno_import["msg"].append( - grid_issues_before_geno_import["msg"] - ) - all_costs.append(costs) - all_grid_issues["network"].append(grid_issues["network"]) - all_grid_issues["msg"].append(grid_issues["msg"]) - else: - ( - all_costs_before_geno_import, - all_grid_issues_before_geno_import, - all_costs, - all_grid_issues, - ) = run_edisgo_pool( - ding0_file_list, - run_args_opt_no_scenario, - args.workers, - args.worker_lifetime, - ) - - # consolidate costs for all the networks - all_costs_before_geno_import = pd.concat( - all_costs_before_geno_import, ignore_index=True - ) - all_costs = pd.concat(all_costs, ignore_index=True) - - # write costs and error messages to csv files - pd.DataFrame(all_grid_issues_before_geno_import).dropna(axis=0, how="all").to_csv( - args.out_dir + exec_time + "_" + "grid_issues_before_geno_import.csv", - index=False, - ) - - with open( - args.out_dir + exec_time + "_" + "costs_before_geno_import.csv", "a" - ) as f: - f.write(",,,# units: length in km,, total_costs in kEUR\n") - all_costs_before_geno_import.to_csv(f, index=False) - - pd.DataFrame(all_grid_issues).dropna(axis=0, how="all").to_csv( - args.out_dir + exec_time + "_" + "grid_issues.csv", index=False - ) - with open(args.out_dir + exec_time + "_" + "costs.csv", "a") as f: - f.write(",,,# units: length in km,, total_costs in kEUR\n") - all_costs.to_csv(f, index=False) - - -if __name__ == "__main__": - pass diff --git a/edisgo/tools/logger.py b/edisgo/tools/logger.py new file mode 100644 index 000000000..93381c9c3 --- /dev/null +++ b/edisgo/tools/logger.py @@ -0,0 +1,205 @@ +import logging +import os +import sys + +from datetime import datetime + +from edisgo.tools import config as cfg_edisgo + + +def setup_logger( + file_name=None, + log_dir=None, + loggers=None, + stream_output=sys.stdout, + debug_message=False, + reset_loggers=False, +): + """ + Setup different loggers with individual logging levels and where to write output. + + The following table from python 'Logging Howto' shows you when which logging level + is used. + + .. tabularcolumns:: |l|L| + + +--------------+---------------------------------------------+ + | Level | When it's used | + +==============+=============================================+ + | ``DEBUG`` | Detailed information, typically of interest | + | | only when diagnosing problems. | + +--------------+---------------------------------------------+ + | ``INFO`` | Confirmation that things are working as | + | | expected. | + +--------------+---------------------------------------------+ + | ``WARNING`` | An indication that something unexpected | + | | happened, or indicative of some problem in | + | | the near future (e.g. 'disk space low'). | + | | The software is still working as expected. | + +--------------+---------------------------------------------+ + | ``ERROR`` | Due to a more serious problem, the software | + | | has not been able to perform some function. | + +--------------+---------------------------------------------+ + | ``CRITICAL`` | A serious error, indicating that the program| + | | itself may be unable to continue running. | + +--------------+---------------------------------------------+ + + Parameters + ---------- + file_name : str or None + Specifies file name of file logging information is written to. Possible options + are: + + * None (default) + Saves log file with standard name `%Y_%m_%d-%H:%M:%S_edisgo.log`. + * str + Saves log file with the specified file name. + + log_dir : str or None + Specifies directory log file is saved to. Possible options are: + + * None (default) + Saves log file in current working directory. + * "default" + Saves log file into directory configured in the configs. + * str + Saves log file into the specified directory. + + loggers : None or list(dict) + + * None + Configuration as shown in the example below is used. Configures root logger + with file and stream level warning and the edisgo logger with file and + stream level debug. + * list(dict) + List of dicts with the logger configuration. Each dictionary must contain + the following keys and corresponding values: + + * 'name' + Specifies name of the logger as string, e.g. 'root' or 'edisgo'. + * 'file_level' + Specifies file logging level. Possible options are: + + * "debug" + Logs logging messages with logging level logging.DEBUG and above. + * "info" + Logs logging messages with logging level logging.INFO and above. + * "warning" + Logs logging messages with logging level logging.WARNING and above. + * "error" + Logs logging messages with logging level logging.ERROR and above. + * "critical" + Logs logging messages with logging level logging.CRITICAL. + * None + No logging messages are logged. + * 'stream_level' + Specifies stream logging level. Possible options are the same as for + `file_level`. + + stream_output : stream + Default sys.stdout is used. sys.stderr is also possible. + + debug_message : bool + If True the handlers of every configured logger is printed. + + reset_loggers : bool + If True the handlers of all loggers are cleared before configuring the loggers. + + Examples + -------- + >>> setup_logger( + >>> loggers=[ + >>> {"name": "root", "file_level": "warning", "stream_level": "warning"}, + >>> {"name": "edisgo", "file_level": "info", "stream_level": "info"} + >>> ] + >>> ) + + """ + + def create_dir(dir_path): + if not os.path.isdir(dir_path): + os.mkdir(dir_path) + + def get_default_root_dir(): + dir_path = str(cfg_edisgo.get("user_dirs", "root_dir")) + return os.path.join(os.path.expanduser("~"), dir_path) + + def create_home_dir(): + dir_path = get_default_root_dir() + create_dir(dir_path) + + cfg_edisgo.load_config("config_system.cfg") + + if file_name is None: + now = datetime.now() + file_name = now.strftime("%Y_%m_%d-%H:%M:%S_edisgo.log") + + if log_dir == "default": + create_home_dir() + log_dir = os.path.join( + get_default_root_dir(), cfg_edisgo.get("user_dirs", "log_dir") + ) + create_dir(log_dir) + + if log_dir is not None: + file_name = os.path.join(log_dir, file_name) + + if reset_loggers: + existing_loggers = [logging.getLogger()] # get the root logger + existing_loggers = existing_loggers + [ + logging.getLogger(name) for name in logging.root.manager.loggerDict + ] + + for logger in existing_loggers: + logger.handlers.clear() + + loglevel_dict = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, + None: logging.CRITICAL + 1, + } + + file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s: %(message)s" + ) + stream_formatter = logging.Formatter("%(name)s - %(levelname)s: %(message)s") + + if loggers is None: + loggers = [ + {"name": "root", "file_level": "warning", "stream_level": "warning"}, + {"name": "edisgo", "file_level": "info", "stream_level": "info"}, + ] + + for logger_config in loggers: + logger_name = logger_config["name"] + logger_file_level = loglevel_dict[logger_config["file_level"]] + logger_stream_level = loglevel_dict[logger_config["stream_level"]] + + if logger_name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(logger_name) + logger.propagate = False + + if logger_file_level < logger_stream_level: + logger.setLevel(logger_file_level) + else: + logger.setLevel(logger_stream_level) + + if logger_file_level < logging.CRITICAL + 1: + file_handler = logging.FileHandler(file_name) + file_handler.setLevel(logger_file_level) + file_handler.setFormatter(file_formatter) + logger.addHandler(file_handler) + + if logger_stream_level < logging.CRITICAL + 1: + console_handler = logging.StreamHandler(stream=stream_output) + console_handler.setLevel(logger_stream_level) + console_handler.setFormatter(stream_formatter) + logger.addHandler(console_handler) + + if debug_message: + print(f"Handlers of logger {logger_name}: {logger.handlers}") diff --git a/examples/edisgo_simple_example.ipynb b/examples/edisgo_simple_example.ipynb index df5328960..99fcafb64 100755 --- a/examples/edisgo_simple_example.ipynb +++ b/examples/edisgo_simple_example.ipynb @@ -77,6 +77,29 @@ "from edisgo import EDisGo" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Set up logger" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# set up logger that streams edisgo logging messages with level info and above \n", + "# and other logging messages with level warning and above to stdout\n", + "setup_logger(\n", + " loggers=[\n", + " {\"name\": \"root\", \"file_level\": None, \"stream_level\": \"warning\"},\n", + " {\"name\": \"edisgo\", \"file_level\": None, \"stream_level\": \"info\"}\n", + " ]\n", + ")" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/examples/example_grid_reinforcement.py b/examples/example_grid_reinforcement.py index 803586a9d..ef671f515 100644 --- a/examples/example_grid_reinforcement.py +++ b/examples/example_grid_reinforcement.py @@ -32,19 +32,29 @@ from edisgo import EDisGo from edisgo.network.results import Results +from edisgo.tools.logger import setup_logger logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) def run_example(): + + # set up logger that streams edisgo logging messages with level info and above + # and other logging messages with level warning and above to stdout + setup_logger( + loggers=[ + {"name": "root", "file_level": None, "stream_level": "warning"}, + {"name": "edisgo", "file_level": None, "stream_level": "info"} + ] + ) + # Specify path to directory containing ding0 grid csv files edisgo_path = os.path.join(os.path.expanduser("~"), ".edisgo") dingo_grid_path = os.path.join(edisgo_path, "ding0_example_grid") # Download example grid data in case it does not yet exist - if not os.path.isdir(dingo_grid_path): + if not os.path.isdir(dingo_grid_path) or len(os.listdir(dingo_grid_path)) == 0: logger.debug("Download example grid data.") - os.makedirs(dingo_grid_path) + os.makedirs(dingo_grid_path, exist_ok=True) file_list = [ "buses.csv", "lines.csv", diff --git a/tests/test_examples.py b/tests/test_examples.py index a71c7adcb..750c42c15 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,3 +1,4 @@ +import logging import os import shutil import subprocess @@ -75,3 +76,8 @@ def test_plot_example_ipynb(self): # os.path.join(examples_dir_path, "edisgo_simple_example.ipynb") # ) # assert errors == [] + + @classmethod + def teardown_class(cls): + logger = logging.getLogger("edisgo") + logger.propagate = True diff --git a/tests/tools/test_logger.py b/tests/tools/test_logger.py new file mode 100644 index 000000000..f461c891b --- /dev/null +++ b/tests/tools/test_logger.py @@ -0,0 +1,59 @@ +import logging +import os + +from edisgo.tools.logger import setup_logger + + +class TestClass: + def test_setup_logger(self): + def check_file_output(output): + with open("edisgo.log", "r") as file: + last_line = file.readlines()[-1].split(" ")[3:] + last_line = " ".join(last_line) + assert last_line == output + + def reset_loggers(): + logger = logging.getLogger("edisgo") + logger.propagate = True + logger.handlers.clear() + logger = logging.getLogger() + logger.handlers.clear() + + if os.path.exists("edisgo.log"): + os.remove("edisgo.log") + + setup_logger( + loggers=[ + {"name": "root", "file_level": "debug", "stream_level": "debug"}, + {"name": "edisgo", "file_level": "debug", "stream_level": "debug"}, + ], + file_name="edisgo.log", + ) + + logger = logging.getLogger("edisgo") + # Test that edisgo logger writes to file. + logger.debug("root") + check_file_output("edisgo - DEBUG: root\n") + # Test that root logger writes to file. + logging.debug("root") + check_file_output("root - DEBUG: root\n") + + # reset_loggers() + + setup_logger( + loggers=[ + {"name": "edisgo", "file_level": "debug", "stream_level": "debug"}, + ], + file_name="edisgo.log", + reset_loggers=True, + debug_message=True, + ) + logger = logging.getLogger("edisgo") + # Test that edisgo logger writes to file. + logger.debug("edisgo") + check_file_output("edisgo - DEBUG: edisgo\n") + # Test that root logger doesn't writes to file. + logging.debug("edisgo") + check_file_output("edisgo - DEBUG: edisgo\n") + + os.remove("edisgo.log")