diff --git a/tests/test_prod_outputs.py b/tests/test_prod_outputs.py index ab9fa3f..1de14dc 100644 --- a/tests/test_prod_outputs.py +++ b/tests/test_prod_outputs.py @@ -243,17 +243,16 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: self.assertIn( "Exception ValueError: Simulated failure: task=_mock_calibrate", message ) + case "_mock_writePreSourceTable" | "_mock_transformPreSourceTable": + self.assertEqual(task_summary.n_successful, 30) + self.assertEqual(task_summary.n_blocked, 6) + self.assertEqual(task_summary.n_failed, 0) + self.assertListEqual(task_summary.failed_quanta, []) case _: - if label == "_mock_writePreSourceTable" or label == "_mock_transformPreSourceTable": - self.assertEqual(task_summary.n_successful, 30) - self.assertEqual(task_summary.n_blocked, 6) - self.assertEqual(task_summary.n_failed, 0) - self.assertListEqual(task_summary.failed_quanta, []) - else: - self.assertEqual(task_summary.n_successful, 36) - self.assertEqual(task_summary.n_blocked, 0) - self.assertEqual(task_summary.n_failed, 0) - self.assertListEqual(task_summary.failed_quanta, []) + self.assertEqual(task_summary.n_successful, 36) + self.assertEqual(task_summary.n_blocked, 0) + self.assertEqual(task_summary.n_failed, 0) + self.assertListEqual(task_summary.failed_quanta, []) # Test datasets for the first QPG. for dataset_type_name, dataset_summary in qg_1_sum.datasets.items(): @@ -266,7 +265,7 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: dataset_summary.unsuccessful_datasets, f"Expected failures were not stored as unsuccessful datasets for {dataset_type_name}.", ) - # Check that the published datasets = expected - (unsuccessful + # Check that the visible datasets = expected - (unsuccessful # + predicted_only) self.assertEqual( dataset_summary.n_visible, @@ -285,10 +284,11 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: "HSC-I", ) # Check that there are the expected amount of failures - # and that they are not published + # and that they are not visible self.assertEqual(len(dataset_summary.unsuccessful_datasets), 6) self.assertEqual(dataset_summary.n_expected, 36) self.assertEqual(dataset_summary.n_visible, 30) + self.assertEqual(dataset_summary.n_predicted_only, 0) # Check that all the counts add up for every task self.assertEqual( @@ -310,6 +310,10 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: # Make an overall QPG and add the recovery attempt to the QPG qpg = QuantumProvenanceGraph() qg_2 = helper.get_quantum_graph("step1", "i-attempt2") + # Quantum graphs are passed in order of execution; collections are + # passed in reverse order because the query in + # `QuantumProvenanceGraph.__resolve_duplicates` requires collections + # be passed with the most recent first. qpg.assemble_quantum_provenance_graph( helper.butler, [qg_1, qg_2], @@ -350,7 +354,7 @@ def check_step1_qpg(self, helper: OutputRepoTests) -> None: # Check that we have the expected datasets for dataset_summary in qg_sum.datasets.values(): # Check counts: we should have recovered everything, so - # published should equal expected for each dataset. + # visible should equal expected for each dataset. self.assertEqual( dataset_summary.n_expected, dataset_summary.n_visible, diff --git a/tests/test_rc2_outputs.py b/tests/test_rc2_outputs.py index 2115ed4..4133191 100644 --- a/tests/test_rc2_outputs.py +++ b/tests/test_rc2_outputs.py @@ -225,6 +225,12 @@ def check_step5_qpg(self, helper: OutputRepoTests) -> None: self.assertEqual(task_summary.n_successful, 46) self.assertEqual(task_summary.n_blocked, 0) self.assertEqual(task_summary.failed_quanta, []) + case _: + raise RuntimeError( + """Task summary contains unexpected + quanta. It is likely this test must be + updated to reflect the mocks.""" + ) # Check on datasets for dataset_type_name, dataset_type_summary in qg_1_sum.datasets.items(): # We shouldn't run into predicted only, shadowed or cursed. @@ -282,13 +288,12 @@ def check_step5_qpg(self, helper: OutputRepoTests) -> None: # Now examine the quantum provenance graph after the recovery attempt # has been made. - # Make the quantum provenance graph for the first attempt + # Get a graph for the second attempt. qg_2 = helper.get_quantum_graph("step5", "attempt2") - # Before we get into that, let's see if we correctly label a successful - # task whose data products do not make it into the output collection - # given as shadowed. - + # Check that if we correctly label a successful task whose data + # products do not make it into the output collection, the data products + # are marked as shadowed. qpg_shadowed = QuantumProvenanceGraph() qpg_shadowed.assemble_quantum_provenance_graph( helper.butler, [qg_1, qg_2], collections=["HSC/runs/RC2/step5-attempt1"], where="instrument='HSC'" @@ -307,9 +312,13 @@ def check_step5_qpg(self, helper: OutputRepoTests) -> None: self.assertEqual(dataset_type_summary.n_predicted_only, 0) self.assertEqual(dataset_type_summary.n_unsuccessful, 0) - # Now for verifying the recovery properly -- the graph below is made - # as intended. + # Make the quantum provenance graph across both attempts properly, to + # check that the recovery was correctly handled. qpg2 = QuantumProvenanceGraph() + # Quantum graphs are passed in order of execution; collections are + # passed in reverse order because the query in + # `QuantumProvenanceGraph.__resolve_duplicates` requires collections + # be passed with the most recent first. qpg2.assemble_quantum_provenance_graph( helper.butler, [qg_1, qg_2],