Skip to content

Commit

Permalink
Use Pydantic models for tests
Browse files Browse the repository at this point in the history
  • Loading branch information
eigerx committed Aug 3, 2024
1 parent ff3811f commit e2ce49c
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 79 deletions.
2 changes: 1 addition & 1 deletion python/lsst/pipe/base/quantum_provenance_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ def from_info(cls, info: DatasetInfo, producer_info: QuantumInfo) -> CursedDatas
class DatasetTypeSummary(pydantic.BaseModel):
"""A summary of the status of all datasets of a particular type."""

producer: str
producer: str = ""
"""The name of the task which produced this dataset.
"""

Expand Down
138 changes: 60 additions & 78 deletions tests/test_quantum_provenance_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,12 @@

import unittest

from lsst.pipe.base.quantum_provenance_graph import DatasetTypeSummary, QuantumProvenanceGraph, TaskSummary
from lsst.pipe.base.quantum_provenance_graph import (
DatasetTypeSummary,
QuantumProvenanceGraph,
Summary,
TaskSummary,
)
from lsst.pipe.base.tests import simpleQGraph
from lsst.utils.tests import temporaryDirectory

Expand All @@ -42,7 +47,8 @@ class QuantumProvenanceGraphTestCase(unittest.TestCase):
Verify that the `QuantumProvenanceGraph` is able to extract correct
information from `simpleQgraph`.
More tests are in lsst/ci_middleware/tests/test_prod_outputs.py
More tests are in lsst/ci_middleware/tests/test_prod_outputs.py and
lsst/ci_middleware/tests/test_rc2_outputs.py
"""

def test_qpg_reports(self) -> None:
Expand All @@ -55,53 +61,30 @@ def test_qpg_reports(self) -> None:
qpg = QuantumProvenanceGraph()
qpg.add_new_graph(butler, qgraph)
qpg.resolve_duplicates(butler)
d = qpg.to_summary(butler)
self.assertIsNotNone(d)
with open("testmodel.json", "w") as buffer:
buffer.write(d.model_dump_json(indent=2))
summary_dict = d.model_dump()
for task in d.tasks:
self.assertIsInstance(d.tasks[task], TaskSummary)
summary = qpg.to_summary(butler)
Summary.model_validate(summary)

TaskSummary.model_validate(summary.tasks)
for task_summary in summary.tasks.values():
# We know that we have one expected task that was not run.
# As such, the following dictionary should describe all of
# the mock tasks.
self.assertDictEqual(
summary_dict["tasks"][task],
{
"n_successful": 0,
"n_blocked": 0,
"n_not_attempted": 1,
"n_expected": 1,
"failed_quanta": [],
"recovered_quanta": [],
"wonky_quanta": [],
"n_wonky": 0,
"n_failed": 0,
},
)
for dataset in d.datasets:
self.assertIsInstance(d.datasets[dataset], DatasetTypeSummary)
self.assertListEqual(
summary_dict["datasets"][dataset]["unsuccessful_datasets"],
[{"instrument": "INSTR", "detector": 0}],
self.assertEqual(
task_summary,
TaskSummary(
n_successful=0,
n_blocked=0,
n_not_attempted=1,
n_expected=1,
failed_quanta=[],
recovered_quanta=[],
wonky_quanta=[],
n_wonky=0,
n_failed=0,
),
)
# Check dataset counts (can't be done all in one because
# datasets have different producers), but all the counts for
# each task should be the same.
self.assertEqual(summary_dict["datasets"][dataset]["n_published"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_unpublished"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_published"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_predicted_only"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_expected"], 1)
self.assertEqual(summary_dict["datasets"][dataset]["n_published"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_cursed"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_published"], 0)
self.assertEqual(summary_dict["datasets"][dataset]["n_unsuccessful"], 1)
# Make sure the cursed dataset is an empty list
self.assertIsInstance(summary_dict["datasets"][dataset]["cursed_datasets"], list)
self.assertFalse(summary_dict["datasets"][dataset]["cursed_datasets"])
# Make sure we have the right datasets based on the mock we have
for task in [
DatasetTypeSummary.model_validate(summary.datasets)
expected_mock_datasets = [
"add_dataset1",
"add2_dataset1",
"task0_metadata",
Expand All @@ -122,36 +105,35 @@ def test_qpg_reports(self) -> None:
"add2_dataset5",
"task4_metadata",
"task4_log",
]:
self.assertIn(task, list(summary_dict["datasets"].keys()))
# Make sure the expected datasets were produced by the expected tasks
for dataset in ["add_dataset1", "add2_dataset1", "task0_metadata", "task0_log"]:
self.assertEqual(summary_dict["datasets"][dataset]["producer"], "task0")
for dataset in [
"add_dataset2",
"add2_dataset2",
"task1_metadata",
"task1_log",
]:
self.assertEqual(summary_dict["datasets"][dataset]["producer"], "task1")
for dataset in [
"add_dataset3",
"add2_dataset3",
"task2_metadata",
"task2_log",
]:
self.assertEqual(summary_dict["datasets"][dataset]["producer"], "task2")
for dataset in [
"add_dataset4",
"add2_dataset4",
"task3_metadata",
"task3_log",
]:
self.assertEqual(summary_dict["datasets"][dataset]["producer"], "task3")
for dataset in [
"add_dataset5",
"add2_dataset5",
"task4_metadata",
"task4_log",
]:
self.assertEqual(summary_dict["datasets"][dataset]["producer"], "task4")
]
for dataset_type_name, dataset_type_summary in summary.datasets.items():
self.assertListEqual(
dataset_type_summary.unsuccessful_datasets,
[{"instrument": "INSTR", "detector": 0}],
)
# Check dataset counts (can't be done all in one because
# datasets have different producers), but all the counts for
# each task should be the same.
self.assertEqual(dataset_type_summary.n_published, 0)
self.assertEqual(dataset_type_summary.n_unpublished, 0)
self.assertEqual(dataset_type_summary.n_predicted_only, 0)
self.assertEqual(dataset_type_summary.n_expected, 1)
self.assertEqual(dataset_type_summary.n_cursed, 0)
self.assertEqual(dataset_type_summary.n_unsuccessful, 1)
# Make sure the cursed dataset is an empty list
self.assertListEqual(dataset_type_summary.cursed_datasets, [])
# Make sure we have the right datasets based on our mock
self.assertIn(dataset_type_name, expected_mock_datasets)
# Make sure the expected datasets were produced by the expected
# tasks
match dataset_type_name:
case name if name in ["add_dataset1", "add2_dataset1", "task0_metadata", "task0_log"]:
self.assertEqual(dataset_type_summary.producer, "task0")

Check warning on line 131 in tests/test_quantum_provenance_graph.py

View check run for this annotation

Codecov / codecov/patch

tests/test_quantum_provenance_graph.py#L131

Added line #L131 was not covered by tests
case name if name in ["add_dataset2", "add2_dataset2", "task1_metadata", "task1_log"]:
self.assertEqual(dataset_type_summary.producer, "task1")

Check warning on line 133 in tests/test_quantum_provenance_graph.py

View check run for this annotation

Codecov / codecov/patch

tests/test_quantum_provenance_graph.py#L133

Added line #L133 was not covered by tests
case name if name in ["add_dataset3", "add2_dataset3", "task2_metadata", "task2_log"]:
self.assertEqual(dataset_type_summary.producer, "task2")

Check warning on line 135 in tests/test_quantum_provenance_graph.py

View check run for this annotation

Codecov / codecov/patch

tests/test_quantum_provenance_graph.py#L135

Added line #L135 was not covered by tests
case name if name in ["add_dataset4", "add2_dataset4", "task3_metadata", "task3_log"]:
self.assertEqual(dataset_type_summary.producer, "task3")

Check warning on line 137 in tests/test_quantum_provenance_graph.py

View check run for this annotation

Codecov / codecov/patch

tests/test_quantum_provenance_graph.py#L137

Added line #L137 was not covered by tests
case name if name in ["add_dataset5", "add2_dataset5", "task4_metadata", "task4_log"]:
self.assertEqual(dataset_type_summary.producer, "task4")

0 comments on commit e2ce49c

Please sign in to comment.