From 23d950052398abd1b7d29df0087d032c64719e60 Mon Sep 17 00:00:00 2001 From: Orion Eiger Date: Tue, 17 Sep 2024 21:44:11 -0700 Subject: [PATCH] Update step 8 mocks to step 5 --- tests/test_prod_outputs.py | 13 +- tests/test_rc2_outputs.py | 239 +++++++++++++++++++++---------------- 2 files changed, 146 insertions(+), 106 deletions(-) diff --git a/tests/test_prod_outputs.py b/tests/test_prod_outputs.py index 737c645..0433157 100644 --- a/tests/test_prod_outputs.py +++ b/tests/test_prod_outputs.py @@ -202,7 +202,9 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: # Make the quantum provenance graph for the first attempt qg_1 = helper.get_quantum_graph("step1", "i-attempt1") qpg1 = QuantumProvenanceGraph() - qpg1.assemble_quantum_provenance_graph(helper.butler, [qg_1], collections=["HSC/runs/Prod/step1-i-attempt1"], where="instrument='HSC'") + qpg1.assemble_quantum_provenance_graph( + helper.butler, [qg_1], collections=["HSC/runs/Prod/step1-i-attempt1"], where="instrument='HSC'" + ) qg_1_sum = qpg1.to_summary(helper.butler) # Loop through the tasks in the dict @@ -230,7 +232,7 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: self.assertIsInstance(quantum_summary.data_id["detector"], int) self.assertEqual(quantum_summary.data_id["visit"], 18202) self.assertDictEqual( - quantum_summary.runs, {"HSC/runs/Prod/step1-i-attempt1": "failed"} + quantum_summary.runs, {"HSC/runs/Prod/step1-i-attempt1": 'FAILED'} ) self.assertIsInstance(quantum_summary.messages, list) for message in quantum_summary.messages: @@ -308,7 +310,12 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: # Make an overall QPG and add the recovery attempt to the QPG qpg = QuantumProvenanceGraph() qg_2 = helper.get_quantum_graph("step1", "i-attempt2") - qpg.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/Prod/step1-i-attempt2", "HSC/runs/Prod/step1-i-attempt1"], where="instrument='HSC'") + qpg.assemble_quantum_provenance_graph( + helper.butler, + [qg_1, qg_2], + collections=["HSC/runs/Prod/step1-i-attempt2", "HSC/runs/Prod/step1-i-attempt1"], + where="instrument='HSC'", + ) qg_sum = qpg.to_summary(helper.butler) for label, task_summary in qg_sum.tasks.items(): diff --git a/tests/test_rc2_outputs.py b/tests/test_rc2_outputs.py index 6bced6d..2115ed4 100644 --- a/tests/test_rc2_outputs.py +++ b/tests/test_rc2_outputs.py @@ -149,17 +149,19 @@ def test_step5_rescue_direct(self) -> None: self.assertIn(get_mock_name("transformForcedSourceTable"), tasks_with_quanta_1) self.assertNotIn(get_mock_name("transformForcedSourceTable"), tasks_with_quanta_2) - def test_step8_rescue_qbb(self) -> None: + def test_step5_rescue_qbb(self) -> None: self.check_step5_rescue(self.qbb) - def check_step8_qpg(self, helper: OutputRepoTests) -> None: - """Check that the fail-and-recover attempts in step 8 are properly + def check_step5_qpg(self, helper: OutputRepoTests) -> None: + """Check that the fail-and-recover attempts in step 5 are properly diagnosed using the `QuantumProvenanceGraph`. """ # Make the quantum provenance graph for the first attempt - qg_1 = helper.get_quantum_graph("step8", "attempt1") + qg_1 = helper.get_quantum_graph("step5", "attempt1") qpg1 = QuantumProvenanceGraph() - qpg1.assemble_quantum_provenance_graph(helper.butler, qg_1, collections=["HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'") + qpg1.assemble_quantum_provenance_graph( + helper.butler, [qg_1], collections=["HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'" + ) qg_1_sum = qpg1.to_summary(helper.butler) # Check that expected, wonky and not attempted do not occur throughout @@ -171,125 +173,131 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None: self.assertListEqual(task_summary.recovered_quanta, []) match label: # Check that the failure was documented in expected ways: - case "_mock_analyzeObjectTableCore": + case label if label in [ + "_mock_transformForcedSourceTable", + "_mock_drpAssociation", + "_mock_drpDiaCalculation", + "_mock_transformForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(task_summary.n_expected, 4) + self.assertEqual(task_summary.n_failed, 0) + self.assertEqual(task_summary.n_successful, 4) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual(task_summary.failed_quanta, []) + case "_mock_consolidateForcedSourceTable": self.assertEqual(task_summary.n_expected, 1) self.assertEqual(task_summary.n_failed, 1) self.assertEqual(task_summary.n_successful, 0) + self.assertEqual(task_summary.n_blocked, 0) self.assertEqual( task_summary.failed_quanta, [ UnsuccessfulQuantumSummary( - data_id={"skymap": "ci_mw", "tract": 0}, - runs={"HSC/runs/RC2/step8-attempt1": "failed"}, + data_id={"instrument": "HSC", "skymap": "ci_mw", "tract": 0}, + runs={"HSC/runs/RC2/step5-attempt1": "FAILED"}, messages=[ - "Execution of task '_mock_analyzeObjectTableCore' on quantum {skymap: " - "'ci_mw', tract: 0} failed. Exception ValueError: Simulated failure: " - "task=_mock_analyzeObjectTableCore dataId={skymap: 'ci_mw', tract: 0}" + "Execution of task '_mock_consolidateForcedSourceTable' on quantum " + "{instrument: 'HSC', skymap: 'ci_mw', tract: 0} failed. Exception " + "ValueError: Simulated failure: task=_mock_consolidateForcedSourceTable " + "dataId={instrument: 'HSC', skymap: 'ci_mw', tract: 0}" ], ) ], ) + case label if label in [ + "_mock_consolidateAssocDiaSourceTable", + "_mock_consolidateFullDiaObjectTable", + "_mock_consolidateForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(task_summary.n_expected, 1) + self.assertEqual(task_summary.n_failed, 0) + self.assertEqual(task_summary.n_successful, 1) self.assertEqual(task_summary.n_blocked, 0) - case _: - # If it's not the failed task, there should be no failures + self.assertEqual(task_summary.failed_quanta, []) + + case label if label in [ + "_mock_forcedPhotCcdOnDiaObjects", + "_mock_forcedPhotDiffOnDiaObjects", + "_mock_writeForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(task_summary.n_expected, 46) self.assertEqual(task_summary.n_failed, 0) - self.assertListEqual(task_summary.failed_quanta, []) - # We also shouldn't have had anything to recover - self.assertListEqual(task_summary.recovered_quanta, []) - # The next few if's are making sure we have the same - # number of expected and successful quanta. We could also - # just assert that n_expected == n_successful. - if label in ["_mock_analyzeMatchedPreVisitCore", "_mock_analyzeMatchedVisitCore"]: - self.assertEqual(task_summary.n_expected, 4) - self.assertEqual(task_summary.n_successful, 4) - self.assertEqual(task_summary.n_blocked, 0) - elif label == "_mock_plotPropertyMapTract": - self.assertEqual(task_summary.n_expected, 2) - self.assertEqual(task_summary.n_successful, 2) - self.assertEqual(task_summary.n_blocked, 0) - elif label in [ - "_mock_makeMetricTableObjectTableCore", - "_mock_objectTableCoreWholeSkyPlot", - ]: - self.assertEqual(task_summary.n_blocked, 1) - self.assertEqual(task_summary.n_successful, 0) - elif label == "_mock_analyzeAmpOffsetMetadata": - self.assertEqual(task_summary.n_expected, 60) - self.assertEqual(task_summary.n_successful, 60) - self.assertEqual(task_summary.n_blocked, 0) - else: - self.assertEqual( - task_summary.n_expected, 1, f"{label} had {task_summary.n_expected} tasks." - ) - self.assertEqual( - task_summary.n_successful, - 1, - f"{label} had {task_summary.n_successful} successful tasks.", - ) - self.assertEqual( - task_summary.n_blocked, 0, f"{label} had {task_summary.n_blocked} blocked tasks." - ) + self.assertEqual(task_summary.n_successful, 46) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual(task_summary.failed_quanta, []) # Check on datasets - for dataset_type_summary in qg_1_sum.datasets.values(): - # We shouldn't run into predicted only, unpublished or cursed. - # Unpublished suggests that the dataset exists but is not included + for dataset_type_name, dataset_type_summary in qg_1_sum.datasets.items(): + # We shouldn't run into predicted only, shadowed or cursed. + # Shadowed suggests that the dataset exists but is not included # in the final collection; cursed suggests that the dataset is - # published but unsuccessful. + # visible but unsuccessful. self.assertEqual(dataset_type_summary.n_predicted_only, 0) self.assertEqual(dataset_type_summary.n_shadowed, 0) self.assertEqual(dataset_type_summary.n_cursed, 0) self.assertListEqual(dataset_type_summary.cursed_datasets, []) match dataset_type_summary.producer: # Check that the failure was documented in expected ways: - case "_mock_analyzeObjectTableCore": + case label if label in [ + "_mock_transformForcedSourceTable", + "_mock_drpAssociation", + "_mock_drpDiaCalculation", + "_mock_transformForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(dataset_type_summary.n_visible, 4) + self.assertEqual(dataset_type_summary.n_expected, 4) + self.assertEqual(dataset_type_summary.n_unsuccessful, 0) + self.assertListEqual(dataset_type_summary.unsuccessful_datasets, []) + case "_mock_consolidateForcedSourceTable": self.assertEqual(dataset_type_summary.n_visible, 0) self.assertEqual(dataset_type_summary.n_expected, 1) self.assertEqual(dataset_type_summary.n_unsuccessful, 1) - self.assertListEqual( - dataset_type_summary.unsuccessful_datasets, - [{"skymap": "ci_mw", "tract": 0}], - ) + if dataset_type_name == "_mock_forcedSourceTable_tract": + self.assertListEqual( + dataset_type_summary.unsuccessful_datasets, + [{"skymap": "ci_mw", "tract": 0}], + ) + else: + self.assertListEqual( + dataset_type_summary.unsuccessful_datasets, + [{"instrument": "HSC", "skymap": "ci_mw", "tract": 0}], + ) case label if label in [ - "_mock_makeMetricTableObjectTableCore", - "_mock_objectTableCoreWholeSkyPlot", + "_mock_consolidateAssocDiaSourceTable", + "_mock_consolidateFullDiaObjectTable", + "_mock_consolidateForcedSourceOnDiaObjectTable", ]: - self.assertEqual(dataset_type_summary.n_unsuccessful, 1) - # These are the non-failed tasks: - case _: + self.assertEqual(dataset_type_summary.n_visible, 1) + self.assertEqual(dataset_type_summary.n_expected, 1) + self.assertEqual(dataset_type_summary.n_unsuccessful, 0) + self.assertListEqual(dataset_type_summary.unsuccessful_datasets, []) + case label if label in [ + "_mock_forcedPhotCcdOnDiaObjects", + "_mock_forcedPhotDiffOnDiaObjects", + "_mock_writeForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(dataset_type_summary.n_visible, 46) + self.assertEqual(dataset_type_summary.n_expected, 46) self.assertEqual(dataset_type_summary.n_unsuccessful, 0) self.assertListEqual(dataset_type_summary.unsuccessful_datasets, []) - if ( - dataset_type_summary.producer == "_mock_analyzeMatchedPreVisitCore" - or dataset_type_summary.producer == "_mock_analyzeMatchedVisitCore" - ): - self.assertEqual(dataset_type_summary.n_visible, 4) - self.assertEqual(dataset_type_summary.n_expected, 4) - elif dataset_type_summary.producer == "_mock_plotPropertyMapTract": - self.assertEqual(dataset_type_summary.n_visible, 2) - self.assertEqual(dataset_type_summary.n_expected, 2) - elif dataset_type_summary.producer == "_mock_analyzeAmpOffsetMetadata": - self.assertEqual(dataset_type_summary.n_visible, 60) - self.assertEqual(dataset_type_summary.n_expected, 60) - else: - self.assertEqual(dataset_type_summary.n_visible, 1) - self.assertEqual(dataset_type_summary.n_expected, 1) # Now examine the quantum provenance graph after the recovery attempt # has been made. # Make the quantum provenance graph for the first attempt - qg_2 = helper.get_quantum_graph("step8", "attempt2") + qg_2 = helper.get_quantum_graph("step5", "attempt2") # Before we get into that, let's see if we correctly label a successful # task whose data products do not make it into the output collection # given as shadowed. qpg_shadowed = QuantumProvenanceGraph() - qpg_shadowed.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'") - qpg_shadowed_sum = qpg_shadowed.to_summary(helper.butler) + qpg_shadowed.assemble_quantum_provenance_graph( + helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'" + ) + qg_shadowed_sum = qpg_shadowed.to_summary(helper.butler) - for dataset_type_name, dataset_type_summary in qpg_shadowed_sum.datasets.items(): - if dataset_type_summary.producer == "_mock_analyzeObjectTableCore": - if dataset_type_name == "_mock_analyzeObjectTableCore_log": + for dataset_type_name, dataset_type_summary in qg_shadowed_sum.datasets.items(): + if dataset_type_summary.producer == "_mock_consolidateForcedSourceTable": + if dataset_type_name == "_mock_consolidateForcedSourceTable_log": continue else: self.assertEqual(dataset_type_summary.n_visible, 0) @@ -302,40 +310,65 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None: # Now for verifying the recovery properly -- the graph below is made # as intended. qpg2 = QuantumProvenanceGraph() - qpg2.assemble_quantum_provenance_graph(helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step8-attempt2", "HSC/runs/RC2/step8-attempt1"], where="instrument='HSC'") + qpg2.assemble_quantum_provenance_graph( + helper.butler, + [qg_1, qg_2], + collections=["HSC/runs/RC2/step5-attempt2", "HSC/runs/RC2/step5-attempt1"], + where="instrument='HSC'", + ) qg_2_sum = qpg2.to_summary(helper.butler) for label, task_summary in qg_2_sum.tasks.items(): self.assertEqual(task_summary.n_unknown, 0) self.assertEqual(task_summary.n_wonky, 0) - self.assertEqual(task_summary.n_blocked, 0) self.assertListEqual(task_summary.wonky_quanta, []) # There should be no failures, so we can say for all tasks: self.assertEqual(task_summary.n_successful, task_summary.n_expected) self.assertEqual(task_summary.n_failed, 0) self.assertListEqual(task_summary.failed_quanta, []) match label: + case label if label in [ + "_mock_transformForcedSourceTable", + "_mock_drpAssociation", + "_mock_drpDiaCalculation", + "_mock_transformForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(task_summary.n_expected, 4) + self.assertEqual(task_summary.n_failed, 0) + self.assertEqual(task_summary.n_successful, 4) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual(task_summary.failed_quanta, []) + self.assertEqual(task_summary.recovered_quanta, []) # Check that the failure was recovered: + case "_mock_consolidateForcedSourceTable": + self.assertEqual(task_summary.n_expected, 1) + self.assertEqual(task_summary.n_successful, 1) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual( + task_summary.recovered_quanta, + [{"instrument": "HSC", "skymap": "ci_mw", "tract": 0}], + ) case label if label in [ - "_mock_analyzeObjectTableCore", - "_mock_makeMetricTableObjectTableCore", - "_mock_objectTableCoreWholeSkyPlot", + "_mock_consolidateAssocDiaSourceTable", + "_mock_consolidateFullDiaObjectTable", + "_mock_consolidateForcedSourceOnDiaObjectTable", ]: self.assertEqual(task_summary.n_expected, 1) + self.assertEqual(task_summary.n_failed, 0) self.assertEqual(task_summary.n_successful, 1) self.assertEqual(task_summary.n_blocked, 0) - if label == "_mock_analyzeObjectTableCore": - self.assertEqual( - task_summary.recovered_quanta, - [{"skymap": "ci_mw", "tract": 0}], - ) - if label in ["_mock_makeMetricTableObjectTableCore", "_mock_objectTableCoreWholeSkyPlot"]: - self.assertEqual( - task_summary.recovered_quanta, - [{"skymap": "ci_mw"}], - ) - case _: - self.assertListEqual(task_summary.recovered_quanta, []) + self.assertEqual(task_summary.failed_quanta, []) + + case label if label in [ + "_mock_forcedPhotCcdOnDiaObjects", + "_mock_forcedPhotDiffOnDiaObjects", + "_mock_writeForcedSourceOnDiaObjectTable", + ]: + self.assertEqual(task_summary.n_expected, 46) + self.assertEqual(task_summary.n_failed, 0) + self.assertEqual(task_summary.n_successful, 46) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual(task_summary.failed_quanta, []) # Check on datasets for dataset_type_summary in qg_2_sum.datasets.values(): @@ -349,8 +382,8 @@ def check_step8_qpg(self, helper: OutputRepoTests) -> None: self.assertEqual(dataset_type_summary.n_shadowed, 0) self.assertEqual(dataset_type_summary.n_visible, dataset_type_summary.n_expected) - def test_step8_quantum_provenance_graph_qbb(self) -> None: - self.check_step8_qpg(self.qbb) + def test_step5_quantum_provenance_graph_qbb(self) -> None: + self.check_step5_qpg(self.qbb) def test_fgcm_refcats(self) -> None: """Test that FGCM does not get refcats that don't overlap any of its