Skip to content

Commit

Permalink
First stab at tests for aggregate-reports
Browse files Browse the repository at this point in the history
  • Loading branch information
eigerx committed Aug 16, 2024
1 parent 8d42d07 commit 9aaf651
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 1 deletion.
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@ networkx
lsst-resources @ git+https://github.com/lsst/resources@main
lsst-daf-butler @ git+https://github.com/lsst/daf_butler@main
lsst-utils @ git+https://github.com/lsst/utils@main
lsst-pipe-base @ git+https://github.com/lsst/pipe_base@tickets/DM-41711
lsst-pipe-base @ git+https://github.com/lsst/pipe_base@tickets/DM-41605
lsst-pex-config @ git+https://github.com/lsst/pex_config@main
sqlalchemy
58 changes: 58 additions & 0 deletions tests/test_cliCmdReport.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

import yaml
from lsst.ctrl.mpexec.cli.pipetask import cli as pipetask_cli
from lsst.ctrl.mpexec.cli.script.report import print_summary
from lsst.daf.butler.cli.utils import LogCliRunner, clickResultMsg
from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir
from lsst.pipe.base.quantum_provenance_graph import DatasetTypeSummary, Summary, TaskSummary
Expand Down Expand Up @@ -487,6 +488,63 @@ def test_report(self):
},
)

def test_aggregate_reports(self):
"""Test `pipetask aggregate-reports` command. We make one
`SimpleQgraph` and then fake a copy in a couple of different ways,
making sure we can aggregate the similar graphs.
"""
metadata = {"output_run": "run1"}
butler, qgraph1 = makeSimpleQGraph(
run="run",
root=self.root,
metadata=metadata,
)

# Check that we can get the proper run collection from the qgraph
self.assertEqual(check_output_run(qgraph1, "run"), [])

# Save the graph
graph_uri_1 = os.path.join(self.root, "graph1.qgraph")
qgraph1.saveUri(graph_uri_1)

file1 = os.path.join(self.root, "report_test_1.json")
file2 = os.path.join(self.root, "report_test_2.json")
aggregate_file = os.path.join(self.root, "aggregate_report.json")

report1 = self.runner.invoke(
pipetask_cli,
[
"report",
self.root,
graph_uri_1,
"--no-logs",
"--full-output-filename",
file1,
"--force-v2",
],
input="no",
)

self.assertEqual(report1.exit_code, 0, clickResultMsg(report1))
# Now, copy the json output into a duplicate file and aggregate
with open(file1, "r") as f:
sum1 = Summary.model_validate_json(f.read())
sum2 = sum1.model_copy(deep=True)
print_summary(sum2, file2, brief=False)

# Then use these file outputs as the inputs to aggregate reports:
aggregate_report = self.runner.invoke(
pipetask_cli,
[
"aggregate-reports",
file1,
file2,
"--full-output-filename",
aggregate_file,
]
)
# Check that aggregate command had a zero exit code:
self.assertEqual(aggregate_report.exit_code, 0, clickResultMsg(aggregate_report))

if __name__ == "__main__":

Check failure on line 549 in tests/test_cliCmdReport.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E305

expected 2 blank lines after class or function definition, found 1
unittest.main()

0 comments on commit 9aaf651

Please sign in to comment.