Skip to content

Commit

Permalink
Change update step 8 mocks to step 5
Browse files Browse the repository at this point in the history
  • Loading branch information
eigerx committed Sep 18, 2024
1 parent 8f3b512 commit a015d8c
Showing 1 changed file with 121 additions and 105 deletions.
226 changes: 121 additions & 105 deletions tests/test_rc2_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,17 +149,17 @@ def test_step5_rescue_direct(self) -> None:
self.assertIn(get_mock_name("transformForcedSourceTable"), tasks_with_quanta_1)
self.assertNotIn(get_mock_name("transformForcedSourceTable"), tasks_with_quanta_2)

def test_step8_rescue_qbb(self) -> None:
def test_step5_rescue_qbb(self) -> None:
self.check_step5_rescue(self.qbb)

def check_step8_qpg(self, helper: OutputRepoTests) -> None:
"""Check that the fail-and-recover attempts in step 8 are properly
def check_step5_qpg(self, helper: OutputRepoTests) -> None:
"""Check that the fail-and-recover attempts in step 5 are properly
diagnosed using the `QuantumProvenanceGraph`.
"""
# Make the quantum provenance graph for the first attempt
qg_1 = helper.get_quantum_graph("step8", "attempt1")
qg_1 = helper.get_quantum_graph("step5", "attempt1")
qpg1 = QuantumProvenanceGraph()
qpg1.assemble_quantum_provenance_graph(helper.butler, qg_1, collections=["HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'")
qpg1.assemble_quantum_provenance_graph(helper.butler, [qg_1], collections=["HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'")

Check failure on line 162 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E501

line too long (140 > 110 characters)
qg_1_sum = qpg1.to_summary(helper.butler)

# Check that expected, wonky and not attempted do not occur throughout
Expand All @@ -171,125 +171,124 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertListEqual(task_summary.recovered_quanta, [])
match label:
# Check that the failure was documented in expected ways:
case "_mock_analyzeObjectTableCore":
case label if label in ["_mock_transformForcedSourceTable",
"_mock_drpAssociation",
"_mock_drpDiaCalculation",
"_mock_transformForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 4)
self.assertEqual(task_summary.n_failed, 0)
self.assertEqual(task_summary.n_successful, 4)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(task_summary.failed_quanta, [])
case "_mock_consolidateForcedSourceTable":
self.assertEqual(task_summary.n_expected, 1)
self.assertEqual(task_summary.n_failed, 1)
self.assertEqual(task_summary.n_successful, 0)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(
task_summary.failed_quanta,
[
UnsuccessfulQuantumSummary(
data_id={"skymap": "ci_mw", "tract": 0},
runs={"HSC/runs/RC2/step8-attempt1": "failed"},
data_id={"instrument": "HSC", "skymap": "ci_mw", "tract": 0},
runs={"HSC/runs/RC2/step5-attempt1": "failed"},
messages=[
"Execution of task '_mock_analyzeObjectTableCore' on quantum {skymap: "
"'ci_mw', tract: 0} failed. Exception ValueError: Simulated failure: "
"task=_mock_analyzeObjectTableCore dataId={skymap: 'ci_mw', tract: 0}"
"Execution of task '_mock_consolidateForcedSourceTable' on quantum "
"{instrument: 'HSC', skymap: 'ci_mw', tract: 0} failed. Exception "
"ValueError: Simulated failure: task=_mock_consolidateForcedSourceTable "
"dataId={instrument: 'HSC', skymap: 'ci_mw', tract: 0}"
],
)
],
)
case label if label in ["_mock_consolidateAssocDiaSourceTable",
"_mock_consolidateFullDiaObjectTable",
"_mock_consolidateForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 1)
self.assertEqual(task_summary.n_failed, 0)
self.assertEqual(task_summary.n_successful, 1)
self.assertEqual(task_summary.n_blocked, 0)
case _:
# If it's not the failed task, there should be no failures
self.assertEqual(task_summary.failed_quanta, [])

Check failure on line 213 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

W293

blank line contains whitespace
case label if label in ["_mock_forcedPhotCcdOnDiaObjects",
"_mock_forcedPhotDiffOnDiaObjects",
"_mock_writeForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 46)
self.assertEqual(task_summary.n_failed, 0)
self.assertListEqual(task_summary.failed_quanta, [])
# We also shouldn't have had anything to recover
self.assertListEqual(task_summary.recovered_quanta, [])
# The next few if's are making sure we have the same
# number of expected and successful quanta. We could also
# just assert that n_expected == n_successful.
if label in ["_mock_analyzeMatchedPreVisitCore", "_mock_analyzeMatchedVisitCore"]:
self.assertEqual(task_summary.n_expected, 4)
self.assertEqual(task_summary.n_successful, 4)
self.assertEqual(task_summary.n_blocked, 0)
elif label == "_mock_plotPropertyMapTract":
self.assertEqual(task_summary.n_expected, 2)
self.assertEqual(task_summary.n_successful, 2)
self.assertEqual(task_summary.n_blocked, 0)
elif label in [
"_mock_makeMetricTableObjectTableCore",
"_mock_objectTableCoreWholeSkyPlot",
]:
self.assertEqual(task_summary.n_blocked, 1)
self.assertEqual(task_summary.n_successful, 0)
elif label == "_mock_analyzeAmpOffsetMetadata":
self.assertEqual(task_summary.n_expected, 60)
self.assertEqual(task_summary.n_successful, 60)
self.assertEqual(task_summary.n_blocked, 0)
else:
self.assertEqual(
task_summary.n_expected, 1, f"{label} had {task_summary.n_expected} tasks."
)
self.assertEqual(
task_summary.n_successful,
1,
f"{label} had {task_summary.n_successful} successful tasks.",
)
self.assertEqual(
task_summary.n_blocked, 0, f"{label} had {task_summary.n_blocked} blocked tasks."
)
self.assertEqual(task_summary.n_successful, 46)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(task_summary.failed_quanta, [])
# Check on datasets
for dataset_type_summary in qg_1_sum.datasets.values():
# We shouldn't run into predicted only, unpublished or cursed.
# Unpublished suggests that the dataset exists but is not included
for dataset_type_name, dataset_type_summary in qg_1_sum.datasets.items():
# We shouldn't run into predicted only, shadowed or cursed.
# Shadowed suggests that the dataset exists but is not included
# in the final collection; cursed suggests that the dataset is
# published but unsuccessful.
# visible but unsuccessful.
self.assertEqual(dataset_type_summary.n_predicted_only, 0)
self.assertEqual(dataset_type_summary.n_shadowed, 0)
self.assertEqual(dataset_type_summary.n_cursed, 0)
self.assertListEqual(dataset_type_summary.cursed_datasets, [])
match dataset_type_summary.producer:
# Check that the failure was documented in expected ways:
case "_mock_analyzeObjectTableCore":
case label if label in ["_mock_transformForcedSourceTable",
"_mock_drpAssociation",
"_mock_drpDiaCalculation",
"_mock_transformForcedSourceOnDiaObjectTable",
]:
self.assertEqual(dataset_type_summary.n_visible, 4)
self.assertEqual(dataset_type_summary.n_expected, 4)
self.assertEqual(dataset_type_summary.n_unsuccessful, 0)
self.assertListEqual(dataset_type_summary.unsuccessful_datasets, [])
case "_mock_consolidateForcedSourceTable":
self.assertEqual(dataset_type_summary.n_visible, 0)
self.assertEqual(dataset_type_summary.n_expected, 1)
self.assertEqual(dataset_type_summary.n_unsuccessful, 1)
self.assertListEqual(
dataset_type_summary.unsuccessful_datasets,
[{"skymap": "ci_mw", "tract": 0}],
)
if dataset_type_name== "_mock_forcedSourceTable_tract":

Check failure on line 248 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E225

missing whitespace around operator
self.assertListEqual(
dataset_type_summary.unsuccessful_datasets,
[{"skymap": "ci_mw", "tract": 0}],
)
else:
self.assertListEqual(
dataset_type_summary.unsuccessful_datasets,
[{"instrument": "HSC", "skymap": "ci_mw", "tract": 0}],
)
case label if label in [
"_mock_makeMetricTableObjectTableCore",
"_mock_objectTableCoreWholeSkyPlot",
]:
self.assertEqual(dataset_type_summary.n_unsuccessful, 1)
# These are the non-failed tasks:
case _:
"_mock_consolidateAssocDiaSourceTable",
"_mock_consolidateFullDiaObjectTable",
"_mock_consolidateForcedSourceOnDiaObjectTable",
]:

Check failure on line 262 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E126

continuation line over-indented for hanging indent
self.assertEqual(dataset_type_summary.n_visible, 1)
self.assertEqual(dataset_type_summary.n_expected, 1)
self.assertEqual(dataset_type_summary.n_unsuccessful, 0)
self.assertListEqual(dataset_type_summary.unsuccessful_datasets, [])
case label if label in ["_mock_forcedPhotCcdOnDiaObjects",
"_mock_forcedPhotDiffOnDiaObjects",
"_mock_writeForcedSourceOnDiaObjectTable",
]:
self.assertEqual(dataset_type_summary.n_visible, 46)
self.assertEqual(dataset_type_summary.n_expected, 46)
self.assertEqual(dataset_type_summary.n_unsuccessful, 0)
self.assertListEqual(dataset_type_summary.unsuccessful_datasets, [])
if (
dataset_type_summary.producer == "_mock_analyzeMatchedPreVisitCore"
or dataset_type_summary.producer == "_mock_analyzeMatchedVisitCore"
):
self.assertEqual(dataset_type_summary.n_visible, 4)
self.assertEqual(dataset_type_summary.n_expected, 4)
elif dataset_type_summary.producer == "_mock_plotPropertyMapTract":
self.assertEqual(dataset_type_summary.n_visible, 2)
self.assertEqual(dataset_type_summary.n_expected, 2)
elif dataset_type_summary.producer == "_mock_analyzeAmpOffsetMetadata":
self.assertEqual(dataset_type_summary.n_visible, 60)
self.assertEqual(dataset_type_summary.n_expected, 60)
else:
self.assertEqual(dataset_type_summary.n_visible, 1)
self.assertEqual(dataset_type_summary.n_expected, 1)

# Now examine the quantum provenance graph after the recovery attempt
# has been made.
# Make the quantum provenance graph for the first attempt
qg_2 = helper.get_quantum_graph("step8", "attempt2")
qg_2 = helper.get_quantum_graph("step5", "attempt2")

# Before we get into that, let's see if we correctly label a successful
# task whose data products do not make it into the output collection
# given as shadowed.

qpg_shadowed = QuantumProvenanceGraph()
qpg_shadowed.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'")
qpg_shadowed_sum = qpg_shadowed.to_summary(helper.butler)
qpg_shadowed.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'")

Check failure on line 286 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E501

line too long (154 > 110 characters)
qg_shadowed_sum = qpg_shadowed.to_summary(helper.butler)

for dataset_type_name, dataset_type_summary in qpg_shadowed_sum.datasets.items():
if dataset_type_summary.producer == "_mock_analyzeObjectTableCore":
if dataset_type_name == "_mock_analyzeObjectTableCore_log":
for dataset_type_name, dataset_type_summary in qg_shadowed_sum.datasets.items():
if dataset_type_summary.producer == "_mock_consolidateForcedSourceTable":
if dataset_type_name == "_mock_consolidateForcedSourceTable_log":
continue
else:
self.assertEqual(dataset_type_summary.n_visible, 0)
Expand All @@ -302,40 +301,57 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
# Now for verifying the recovery properly -- the graph below is made
# as intended.
qpg2 = QuantumProvenanceGraph()
qpg2.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step8-attempt2", "HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'")
qpg2.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step5-attempt2", "HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'")

Check failure on line 304 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E501

line too long (177 > 110 characters)
qg_2_sum = qpg2.to_summary(helper.butler)

for label, task_summary in qg_2_sum.tasks.items():
self.assertEqual(task_summary.n_unknown, 0)
self.assertEqual(task_summary.n_wonky, 0)
self.assertEqual(task_summary.n_blocked, 0)
self.assertListEqual(task_summary.wonky_quanta, [])
# There should be no failures, so we can say for all tasks:
self.assertEqual(task_summary.n_successful, task_summary.n_expected)
self.assertEqual(task_summary.n_failed, 0)
self.assertListEqual(task_summary.failed_quanta, [])
match label:
case label if label in ["_mock_transformForcedSourceTable",
"_mock_drpAssociation",
"_mock_drpDiaCalculation",
"_mock_transformForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 4)
self.assertEqual(task_summary.n_failed, 0)
self.assertEqual(task_summary.n_successful, 4)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(task_summary.failed_quanta, [])
self.assertEqual(task_summary.recovered_quanta, [])
# Check that the failure was recovered:
case label if label in [
"_mock_analyzeObjectTableCore",
"_mock_makeMetricTableObjectTableCore",
"_mock_objectTableCoreWholeSkyPlot",
]:
case "_mock_consolidateForcedSourceTable":
self.assertEqual(task_summary.n_expected, 1)
self.assertEqual(task_summary.n_successful, 1)
self.assertEqual(task_summary.n_blocked, 0)
if label == "_mock_analyzeObjectTableCore":
self.assertEqual(
task_summary.recovered_quanta,
[{"skymap": "ci_mw", "tract": 0}],
)
if label in ["_mock_makeMetricTableObjectTableCore", "_mock_objectTableCoreWholeSkyPlot"]:
self.assertEqual(
self.assertEqual(
task_summary.recovered_quanta,

Check failure on line 333 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E126

continuation line over-indented for hanging indent
[{"skymap": "ci_mw"}],
)
case _:
self.assertListEqual(task_summary.recovered_quanta, [])
[{"instrument": "HSC", "skymap": "ci_mw", "tract": 0}],
)

Check failure on line 335 in tests/test_rc2_outputs.py

View workflow job for this annotation

GitHub Actions / call-workflow / lint

E122

continuation line missing indentation or outdented
case label if label in ["_mock_consolidateAssocDiaSourceTable",
"_mock_consolidateFullDiaObjectTable",
"_mock_consolidateForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 1)
self.assertEqual(task_summary.n_failed, 0)
self.assertEqual(task_summary.n_successful, 1)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(task_summary.failed_quanta, [])

case label if label in ["_mock_forcedPhotCcdOnDiaObjects",
"_mock_forcedPhotDiffOnDiaObjects",
"_mock_writeForcedSourceOnDiaObjectTable",
]:
self.assertEqual(task_summary.n_expected, 46)
self.assertEqual(task_summary.n_failed, 0)
self.assertEqual(task_summary.n_successful, 46)
self.assertEqual(task_summary.n_blocked, 0)
self.assertEqual(task_summary.failed_quanta, [])

# Check on datasets
for dataset_type_summary in qg_2_sum.datasets.values():
Expand All @@ -349,8 +365,8 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None:
self.assertEqual(dataset_type_summary.n_shadowed, 0)
self.assertEqual(dataset_type_summary.n_visible, dataset_type_summary.n_expected)

def test_step8_quantum_provenance_graph_qbb(self) -> None:
self.check_step8_qpg(self.qbb)
def test_step5_quantum_provenance_graph_qbb(self) -> None:
self.check_step5_qpg(self.qbb)

def test_fgcm_refcats(self) -> None:
"""Test that FGCM does not get refcats that don't overlap any of its
Expand Down

0 comments on commit a015d8c

Please sign in to comment.