Skip to content

Commit

Permalink
Satisfy linters, rebase and run GitHub Actions
Browse files Browse the repository at this point in the history
  • Loading branch information
eigerx committed Aug 1, 2024
1 parent 3de139b commit 2f69335
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 41 deletions.
1 change: 1 addition & 0 deletions doc/changes/DM-41711.feature.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add tests for the `QuantumProvenanceGraph`.
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ lsst-daf-butler @ git+https://github.com/lsst/daf_butler@main
lsst-utils @ git+https://github.com/lsst/utils@main
lsst-resources @ git+https://github.com/lsst/resources@main
lsst-pipe-base @ git+https://github.com/lsst/pipe_base@tickets/DM-41711
lsst-ctrl-mpexec @ git+https://github.com/lsst/ctrl_mpexec@main
lsst-ctrl-mpexec @ git+https://github.com/lsst/ctrl_mpexec@tickets/DM-41711
6 changes: 4 additions & 2 deletions tests/test_prod_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,8 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None:
qg_1_dict["datasets"][dataset]["unsuccessful_datasets"][0]["physical_filter"],
"HSC-I",
)
# Check that there are the expected amount of failures and that they are not published
# Check that there are the expected amount of failures
# and that they are not published
self.assertEqual(len(qg_1_dict["datasets"][dataset]["unsuccessful_datasets"]), 6)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_expected"], 36)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_published"], 30)
Expand Down Expand Up @@ -465,7 +466,8 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_2_dict["datasets"][dataset]["n_unsuccessful"], 0)
self.assertListEqual(qg_2_dict["datasets"][dataset]["unsuccessful_datasets"], [])

# Since we have recovered everything, we should have the same numbers for every task:
# Since we have recovered everything, we should have the same
# numbers for every task:
self.assertEqual(qg_2_dict["datasets"][dataset]["n_expected"], 36)
self.assertEqual(qg_2_dict["datasets"][dataset]["n_published"], 36)
self.assertEqual(qg_2_dict["datasets"][dataset]["n_unpublished"], 0)
Expand Down
77 changes: 39 additions & 38 deletions tests/test_rc2_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def test_step8_rescue_qbb(self) -> None:

def check_step8_qpg(self, helper: OutputRepoTests) -> None:
"""Check that the fail-and-recover attempts in step 8 are properly
diagnosed using the `QuantumProvenanceGraph`.
diagnosed using the `QuantumProvenanceGraph`.
"""
# Make the quantum provenance graph for the first attempt
qg_1 = helper.get_quantum_graph("step8", "attempt1")
Expand All @@ -165,7 +165,7 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
)
qg_1_sum_only = qpg1.to_summary(helper.butler)
qg_1_dict = qg_1_sum_only.model_dump()

# Check that expected, wonky and not attempted do not occur throughout
# tasks:
for task in qg_1_dict["tasks"]:
Expand All @@ -180,19 +180,20 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_1_dict["tasks"][task]["n_expected"], 1)
self.assertEqual(qg_1_dict["tasks"]["_mock_analyzeObjectTableCore"]["n_failed"], 1)
self.assertEqual(qg_1_dict["tasks"]["_mock_analyzeObjectTableCore"]["n_successful"], 0)
self.assertEqual(qg_1_dict["tasks"]["_mock_analyzeObjectTableCore"]["failed_quanta"],
[{
"data_id": {
"skymap": "ci_mw",
"tract": 0
},
"runs": {
"HSC/runs/RC2/step8-attempt1": "failed"
},
"messages": [
"Execution of task '_mock_analyzeObjectTableCore' on quantum {skymap: 'ci_mw', tract: 0} failed. Exception ValueError: Simulated failure: task=_mock_analyzeObjectTableCore dataId={skymap: 'ci_mw', tract: 0}"
]
}])
self.assertEqual(
qg_1_dict["tasks"]["_mock_analyzeObjectTableCore"]["failed_quanta"],
[
{
"data_id": {"skymap": "ci_mw", "tract": 0},
"runs": {"HSC/runs/RC2/step8-attempt1": "failed"},
"messages": [
"Execution of task '_mock_analyzeObjectTableCore' on quantum {skymap: "
"'ci_mw', tract: 0} failed. Exception ValueError: Simulated failure: "
"task=_mock_analyzeObjectTableCore dataId={skymap: 'ci_mw', tract: 0}"
],
}
],
)
self.assertEqual(qg_1_dict["tasks"]["_mock_analyzeObjectTableCore"]["n_blocked"], 0)
case _:
# If it's not the failed task, there should be no failures
Expand All @@ -211,7 +212,7 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_1_dict["tasks"][task]["n_successful"], 2)
else:
self.assertEqual(qg_1_dict["tasks"][task]["n_expected"], 1)
self.assertEqual(qg_1_dict["tasks"][task]["n_successful"], 1)
self.assertEqual(qg_1_dict["tasks"][task]["n_successful"], 1)
# Check on datasets
# This used to be a self.assertIn but the list was annoyingly long.
self.assertEqual(len(qg_1_dict["datasets"].keys()), 218)
Expand All @@ -230,18 +231,18 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_1_dict["datasets"][dataset]["n_published"], 0)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_expected"], 1)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_unsuccessful"], 1)
self.assertListEqual(qg_1_dict["datasets"][dataset]["unsuccessful_datasets"],
[
{
"skymap": "ci_mw",
"tract": 0
}
],)
self.assertListEqual(
qg_1_dict["datasets"][dataset]["unsuccessful_datasets"],
[{"skymap": "ci_mw", "tract": 0}],
)
# These are the non-failed tasks:
case _:
self.assertEqual(qg_1_dict["datasets"][dataset]["n_unsuccessful"], 0)
self.assertListEqual(qg_1_dict["datasets"][dataset]["unsuccessful_datasets"], [])
if qg_1_dict["datasets"][dataset]["producer"] == "_mock_analyzeMatchedPreVisitCore" or qg_1_dict["datasets"][dataset]["producer"] == "_mock_analyzeMatchedVisitCore":
if (
qg_1_dict["datasets"][dataset]["producer"] == "_mock_analyzeMatchedPreVisitCore"
or qg_1_dict["datasets"][dataset]["producer"] == "_mock_analyzeMatchedVisitCore"
):
self.assertEqual(qg_1_dict["datasets"][dataset]["n_published"], 4)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_expected"], 4)
elif qg_1_dict["datasets"][dataset]["producer"] == "_mock_plotPropertyMapTract":
Expand All @@ -250,7 +251,7 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
else:
self.assertEqual(qg_1_dict["datasets"][dataset]["n_published"], 1)
self.assertEqual(qg_1_dict["datasets"][dataset]["n_expected"], 1)

# Now examine the quantum provenance graph after the recovery attempt
# has been made.
# Make the quantum provenance graph for the first attempt
Expand Down Expand Up @@ -288,7 +289,9 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
qpg2.add_new_graph(helper.butler, qg_1)
qpg2.add_new_graph(helper.butler, qg_2)
qpg2.resolve_duplicates(
helper.butler, collections=["HSC/runs/RC2/step8-attempt2", "HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'"
helper.butler,
collections=["HSC/runs/RC2/step8-attempt2", "HSC/runs/RC2/step8-attempt1"],
where="instrument='HSC'",
)
qg_2_sum_only = qpg2.to_summary(helper.butler)
qg_2_dict = qg_2_sum_only.model_dump()
Expand All @@ -299,24 +302,22 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_2_dict["tasks"][task]["n_blocked"], 0)
self.assertListEqual(qg_2_dict["tasks"][task]["wonky_quanta"], [])
# There should be no failures, so we can say for all tasks:
self.assertEqual(qg_2_dict["tasks"][task]["n_successful"],
qg_2_dict["tasks"][task]["n_expected"])
self.assertEqual(qg_2_dict["tasks"][task]["n_successful"], qg_2_dict["tasks"][task]["n_expected"])
self.assertEqual(qg_2_dict["tasks"][task]["n_failed"], 0)
self.assertListEqual(qg_2_dict["tasks"][task]["failed_quanta"], [])
match task:
# Check that the failure was recovered:
case "_mock_analyzeObjectTableCore":
self.assertEqual(qg_2_dict["tasks"][task]["n_expected"], 1)
self.assertEqual(qg_2_dict["tasks"]["_mock_analyzeObjectTableCore"]["n_successful"], 1)
self.assertEqual(qg_2_dict["tasks"]["_mock_analyzeObjectTableCore"]["recovered_quanta"],
[{
"skymap": "ci_mw",
"tract": 0
}])
self.assertEqual(
qg_2_dict["tasks"]["_mock_analyzeObjectTableCore"]["recovered_quanta"],
[{"skymap": "ci_mw", "tract": 0}],
)
self.assertEqual(qg_2_dict["tasks"]["_mock_analyzeObjectTableCore"]["n_blocked"], 0)
case _:
self.assertListEqual(qg_2_dict["tasks"][task]["recovered_quanta"], [])

# Check on datasets
# This used to be a self.assertIn but the list was annoyingly long.
self.assertEqual(len(qg_2_dict["datasets"].keys()), 218)
Expand All @@ -329,10 +330,10 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(qg_2_dict["datasets"][dataset]["n_unsuccessful"], 0)
self.assertListEqual(qg_2_dict["datasets"][dataset]["unsuccessful_datasets"], [])
self.assertEqual(qg_2_dict["datasets"][dataset]["n_unpublished"], 0)
self.assertEqual(qg_2_dict["datasets"][dataset]["n_published"],
qg_2_dict["datasets"][dataset]["n_expected"])

self.assertEqual(
qg_2_dict["datasets"][dataset]["n_published"], qg_2_dict["datasets"][dataset]["n_expected"]
)

def test_step8_quantum_provenance_graph_qbb(self) -> None:
self.check_step8_qpg(self.qbb)

Expand Down

0 comments on commit 2f69335

Please sign in to comment.