diff --git a/skore/src/skore/sklearn/_estimator.py b/skore/src/skore/sklearn/_estimator.py index 02492fb82..57772a341 100644 --- a/skore/src/skore/sklearn/_estimator.py +++ b/skore/src/skore/sklearn/_estimator.py @@ -892,7 +892,7 @@ def report_metrics( metrics_kwargs["pos_label"] = pos_label else: raise ValueError( - f"Invalid type of metric: {type(metric)} for metric: {metric}" + f"Invalid type of metric: {type(metric)} for {metric!r}" ) scores.append( diff --git a/skore/tests/unit/sklearn/test_estimator.py b/skore/tests/unit/sklearn/test_estimator.py index 06ed25796..7086376b2 100644 --- a/skore/tests/unit/sklearn/test_estimator.py +++ b/skore/tests/unit/sklearn/test_estimator.py @@ -778,6 +778,16 @@ def custom_metric(y_true, y_pred, some_weights): ) +def test_estimator_report_report_metrics_invalid_metric_type(regression_data): + """Check that we raise the expected error message if an invalid metric is passed.""" + estimator, X_test, y_test = regression_data + report = EstimatorReport(estimator, X_test=X_test, y_test=y_test) + + err_msg = re.escape("Invalid type of metric: for 1") + with pytest.raises(ValueError, match=err_msg): + report.metrics.report_metrics(scoring=[1]) + + def test_estimator_report_get_X_y_and_data_source_hash_error(): """Check that we raise the proper error in `get_X_y_and_use_cache`.""" X, y = make_classification(n_classes=2, random_state=42)