diff --git a/spectrafit/report.py b/spectrafit/report.py index c02142ace..90fb682b4 100644 --- a/spectrafit/report.py +++ b/spectrafit/report.py @@ -20,7 +20,6 @@ from lmfit import Minimizer from lmfit import Parameter from lmfit import Parameters -from lmfit import report_ci from lmfit.minimizer import MinimizerException from lmfit.minimizer import minimize from lmfit.printfuncs import alphanumeric_sort @@ -388,6 +387,73 @@ def warn_meassage(msg: str) -> str: return top + msg + header +class CIReport: + """Generate a report of confidence intervals. + + !!! info "About the Confidence Interval Report" + + This class is responsible for generating a report that displays confidence + intervals for a given set of parameters. The report can be generated as a + table. + + Please also check the original implementation of the `lmfit` package: + https://lmfit.github.io/lmfit-py/confidence.html#lmfit.ci_report + + Args: + ci (Parameters): The confidence intervals for the parameters. + with_offset (bool, optional): Whether to include the offset in the report. + Defaults to True. + ndigits (int, optional): The number of digits to display in the report. + Defaults to 5. + """ + + def __init__( + self, + ci: Dict[str, List[Tuple[float, float]]], + with_offset: Optional[bool] = True, + ndigits: Optional[int] = 5, + ): + """Initialize the Report object. + + Args: + ci (Dict[str, List[Tuple[float, float]]]): The confidence intervals for + the parameters. + with_offset (bool, optional): Whether to include an offset in the report. + Defaults to True. + ndigits (int, optional): The number of digits to round the report values to. + Defaults to 5. + """ + self.ci = ci + self.with_offset = with_offset + self.ndigits = ndigits + self.df = pd.DataFrame() + + def convp(self, x: Tuple[float, float], bound_type: str) -> str: + """Convert the confidence interval to a string.""" + return "BEST" if abs(x[0]) < 1.0e-2 else f"{x[0] * 100:.2f}% - {bound_type}" + + def __call__(self) -> None: + """Generate the Confidence report as a table.""" + report: Dict[str, Dict[str, float]] = {} + + for name, row in self.ci.items(): + offset = 0.0 + if self.with_offset: + for cval, val in row: + if abs(cval) < 1.0e-2: + offset = val + for i, (cval, val) in enumerate(row): + sval = val if cval < 1.0e-2 else val - offset + bound_type = "LOWER" if i < len(row) / 2 else "UPPER" + report.setdefault(self.convp((cval, val), bound_type), {})[name] = sval + self.df = pd.DataFrame(report) + self.tabulate(df=self.df) + + def tabulate(self, df: pd.DataFrame) -> None: + """Print the Confidence report as a table.""" + PrintingResults.print_tabulate_df(df=df, floatfmt=f".{self.ndigits}f") + + class FitReport: """Generate fit reports based on the result of the fitting process. @@ -616,14 +682,7 @@ def __call__(self) -> None: report = self.generate_report() for section, df in report.items(): print(f"\n{section}\n") - print( - tabulate( - df, - headers="keys", - tablefmt="fancy_grid" if sys.platform != "win32" else "grid", - floatfmt=".3f", - ) - ) + PrintingResults.print_tabulate_df(df=df) class PrintingResults: @@ -663,12 +722,25 @@ def print_tabulate(args: Dict[str, Any]) -> None: Args: args (Dict[str, Any]): The args to be printed as a dictionary. """ + PrintingResults.print_tabulate_df( + df=pd.DataFrame(**args).T, + ) + + @staticmethod + def print_tabulate_df(df: pd.DataFrame, floatfmt: str = ".3f") -> None: + """Print the results of the fitting process. + + Args: + df (pd.DataFrame): The DataFrame to be printed. + floatfmt (str, optional): The format of the floating point numbers. + Defaults to ".3f". + """ print( tabulate( - pd.DataFrame(**args).T, + df, headers="keys", tablefmt="fancy_grid" if sys.platform != "win32" else "grid", - floatfmt=".3f", + floatfmt=floatfmt, ) ) @@ -694,7 +766,7 @@ def print_confidence_interval(self) -> None: print("\nConfidence Interval:\n") if self.args["conf_interval"]: try: - report_ci(self.args["confidence_interval"][0]) + CIReport(self.args["confidence_interval"][0])() except (MinimizerException, ValueError, KeyError, TypeError) as exc: warn(f"Error: {exc} -> No confidence interval could be calculated!") self.args["confidence_interval"] = {} diff --git a/spectrafit/test/test_report.py b/spectrafit/test/test_report.py index 205cfb31b..e8f384476 100644 --- a/spectrafit/test/test_report.py +++ b/spectrafit/test/test_report.py @@ -13,6 +13,7 @@ from lmfit import Parameter from lmfit import Parameters from pytest_mock.plugin import MockerFixture +from spectrafit.report import CIReport from spectrafit.report import FitReport from spectrafit.report import PrintingResults from spectrafit.report import PrintingStatus @@ -286,3 +287,54 @@ def test_fit_report_init_error_cases(inpars: List[Any], exception: Exception) -> """ with pytest.raises(exception): # type: ignore FitReport(inpars=inpars) + + +@pytest.mark.parametrize( + "ci, with_offset, ndigits, expected_output, test_id", + [ + ( + {"param1": [(0.025, 2), (0.975, 4)], "param2": [(0.025, 3), (0.975, 5)]}, + True, + 5, + pd.DataFrame( + index=["param1", "param2"], + columns=["BEST", "0.025% - LOWER", "0.975% - UPPER"], + data=[[1.0, 2.0, 4.0], [2.0, 3.0, 5.0]], + ), + "Run - 1", + ), + ( + { + "param1": [(0.0, 1), (0.025, 2), (0.975, 4)], + "param2": [(0.0, 2), (0.025, 3), (0.975, 5)], + }, + False, + 3, + pd.DataFrame( + index=["param1", "param2"], + columns=["BEST", "0.025% - LOWER", "0.975% - UPPER"], + data=[[1.0, 2.0, 4.0], [2.0, 3.0, 5.0]], + ), + "2", + ), + ( + {"param1": [(0.0, 1)]}, + True, + 2, + pd.DataFrame({"BEST": {"param1": 1.0}}), + "3", + ), + ({}, True, 5, pd.DataFrame(), "4"), + ], +) +def test_CIReport( + ci: Dict[str, List[Any]], + with_offset: bool, + ndigits: int, + expected_output: pd.DataFrame, + test_id: str, +) -> None: + """Test the CIReport class.""" + report = CIReport(ci=ci, with_offset=with_offset, ndigits=ndigits) + + report()