diff --git a/docs/source/package_reference/logging.mdx b/docs/source/package_reference/logging.mdx index 9fd01154..9102755c 100644 --- a/docs/source/package_reference/logging.mdx +++ b/docs/source/package_reference/logging.mdx @@ -1,4 +1,7 @@ -# Loggers +# Logging + +## EvaluationTracker +[[autodoc]] logging.evaluation_tracker.EvaluationTracker ## GeneralConfigLogger [[autodoc]] logging.info_loggers.GeneralConfigLogger diff --git a/src/lighteval/logging/evaluation_tracker.py b/src/lighteval/logging/evaluation_tracker.py index 01705534..8cc8c09e 100644 --- a/src/lighteval/logging/evaluation_tracker.py +++ b/src/lighteval/logging/evaluation_tracker.py @@ -82,16 +82,35 @@ def default(self, o): class EvaluationTracker: - """ - Keeps track of the overall evaluation process and relevant informations. + """Keeps track of the overall evaluation process and relevant information. - The [`EvaluationTracker`] contains specific loggers for experiments details - ([`DetailsLogger`]), metrics ([`MetricsLogger`]), task versions - ([`VersionsLogger`]) as well as for the general configurations of both the - specific task ([`TaskConfigLogger`]) and overall evaluation run - ([`GeneralConfigLogger`]). It compiles the data from these loggers and + The [`~logging.evaluation_tracker.EvaluationTracker`] contains specific loggers for experiments details + ([`~logging.evaluation_tracker.DetailsLogger`]), metrics ([`~logging.evaluation_tracker.MetricsLogger`]), task versions + ([`~logging.evaluation_tracker.VersionsLogger`]) as well as for the general configurations of both the + specific task ([`~logging.evaluation_tracker.TaskConfigLogger`]) and overall evaluation run + ([`~logging.evaluation_tracker.GeneralConfigLogger`]). It compiles the data from these loggers and writes it to files, which can be published to the Hugging Face hub if requested. + + Args: + output_dir (`str`): Local folder path where you want results to be saved. + save_details (`bool`, defaults to True): If True, details are saved to the `output_dir`. + push_to_hub (`bool`, defaults to False): If True, details are pushed to the hub. + Results are pushed to `{hub_results_org}/details__{sanitized model_name}` for the model `model_name`, a public dataset, + if `public` is True else `{hub_results_org}/details__{sanitized model_name}_private`, a private dataset. + push_to_tensorboard (`bool`, defaults to False): If True, will create and push the results for a tensorboard folder on the hub. + hub_results_org (`str`, *optional*): The organisation to push the results to. + See more details about the datasets organisation in [`EvaluationTracker.save`]. + tensorboard_metric_prefix (`str`, defaults to "eval"): Prefix for the metrics in the tensorboard logs. + public (`bool`, defaults to False): If True, results and details are pushed to public orgs. + nanotron_run_info ([`~nanotron.config.GeneralArgs`], *optional*): Reference to information about Nanotron models runs. + + **Attributes**: + - **details_logger** ([`~logging.info_loggers.DetailsLogger`]) -- Logger for experiment details. + - **metrics_logger** ([`~logging.info_loggers.MetricsLogger`]) -- Logger for experiment metrics. + - **versions_logger** ([`~logging.info_loggers.VersionsLogger`]) -- Logger for task versions. + - **general_config_logger** ([`~logging.info_loggers.GeneralConfigLogger`]) -- Logger for general configuration. + - **task_config_logger** ([`~logging.info_loggers.TaskConfigLogger`]) -- Logger for task configuration. """ def __init__( @@ -105,23 +124,7 @@ def __init__( public: bool = False, nanotron_run_info: "GeneralArgs" = None, ) -> None: - """ - Creates all the necessary loggers for evaluation tracking. - - Args: - output_dir (str): Local folder path where you want results to be saved - save_details (bool): If True, details are saved to the output_dir - push_to_hub (bool): If True, details are pushed to the hub. - Results are pushed to `{hub_results_org}/details__{sanitized model_name}` for the model `model_name`, a public dataset, - if `public` is True else `{hub_results_org}/details__{sanitized model_name}_private`, a private dataset. - push_results_to_tensorboard (bool): If True, will create and push the results for a tensorboard folder on the hub - hub_results_org (str): The organisation to push the results to. See - more details about the datasets organisation in - [`EvaluationTracker.save`] - tensorboard_metric_prefix (str): Prefix for the metrics in the tensorboard logs - public (bool): If True, results and details are pushed in private orgs - nanotron_run_info (GeneralArgs): Reference to informations about Nanotron models runs - """ + """Creates all the necessary loggers for evaluation tracking.""" self.details_logger = DetailsLogger() self.metrics_logger = MetricsLogger() self.versions_logger = VersionsLogger() diff --git a/src/lighteval/main_accelerate.py b/src/lighteval/main_accelerate.py index a0a01abe..2dd78f44 100644 --- a/src/lighteval/main_accelerate.py +++ b/src/lighteval/main_accelerate.py @@ -31,7 +31,7 @@ logger = logging.getLogger(__name__) TOKEN = os.getenv("HF_TOKEN") -CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") +CACHE_DIR: str = os.getenv("HF_HOME") HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters"