From 9aaf651b8e4a928f0acd1a396774844521a1eca2 Mon Sep 17 00:00:00 2001 From: Orion Eiger Date: Tue, 13 Aug 2024 15:38:53 -0700 Subject: [PATCH] First stab at tests for aggregate-reports --- requirements.txt | 2 +- tests/test_cliCmdReport.py | 58 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 025994df..2debf254 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,6 @@ networkx lsst-resources @ git+https://github.com/lsst/resources@main lsst-daf-butler @ git+https://github.com/lsst/daf_butler@main lsst-utils @ git+https://github.com/lsst/utils@main -lsst-pipe-base @ git+https://github.com/lsst/pipe_base@tickets/DM-41711 +lsst-pipe-base @ git+https://github.com/lsst/pipe_base@tickets/DM-41605 lsst-pex-config @ git+https://github.com/lsst/pex_config@main sqlalchemy diff --git a/tests/test_cliCmdReport.py b/tests/test_cliCmdReport.py index f16ddc09..4de7715b 100644 --- a/tests/test_cliCmdReport.py +++ b/tests/test_cliCmdReport.py @@ -32,6 +32,7 @@ import yaml from lsst.ctrl.mpexec.cli.pipetask import cli as pipetask_cli +from lsst.ctrl.mpexec.cli.script.report import print_summary from lsst.daf.butler.cli.utils import LogCliRunner, clickResultMsg from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir from lsst.pipe.base.quantum_provenance_graph import DatasetTypeSummary, Summary, TaskSummary @@ -487,6 +488,63 @@ def test_report(self): }, ) + def test_aggregate_reports(self): + """Test `pipetask aggregate-reports` command. We make one + `SimpleQgraph` and then fake a copy in a couple of different ways, + making sure we can aggregate the similar graphs. + """ + metadata = {"output_run": "run1"} + butler, qgraph1 = makeSimpleQGraph( + run="run", + root=self.root, + metadata=metadata, + ) + + # Check that we can get the proper run collection from the qgraph + self.assertEqual(check_output_run(qgraph1, "run"), []) + + # Save the graph + graph_uri_1 = os.path.join(self.root, "graph1.qgraph") + qgraph1.saveUri(graph_uri_1) + + file1 = os.path.join(self.root, "report_test_1.json") + file2 = os.path.join(self.root, "report_test_2.json") + aggregate_file = os.path.join(self.root, "aggregate_report.json") + + report1 = self.runner.invoke( + pipetask_cli, + [ + "report", + self.root, + graph_uri_1, + "--no-logs", + "--full-output-filename", + file1, + "--force-v2", + ], + input="no", + ) + + self.assertEqual(report1.exit_code, 0, clickResultMsg(report1)) + # Now, copy the json output into a duplicate file and aggregate + with open(file1, "r") as f: + sum1 = Summary.model_validate_json(f.read()) + sum2 = sum1.model_copy(deep=True) + print_summary(sum2, file2, brief=False) + + # Then use these file outputs as the inputs to aggregate reports: + aggregate_report = self.runner.invoke( + pipetask_cli, + [ + "aggregate-reports", + file1, + file2, + "--full-output-filename", + aggregate_file, + ] + ) + # Check that aggregate command had a zero exit code: + self.assertEqual(aggregate_report.exit_code, 0, clickResultMsg(aggregate_report)) if __name__ == "__main__": unittest.main()