From 7c5e2c1b42e3e1f4a0ec0a48f991e417c4abfaeb Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 11 Aug 2023 15:21:07 -0700 Subject: [PATCH] Use default pydantic model for testing Previously we changed the pydantic models used in testing to be the compatibility models that understand v1 and v2. This is fine but means that any external users using pydantic models as butler datasets break if they are still using v1. Change the test model to use whatever is the native pydantic and then the YAML formatter has been modified to support both APIs. The JSON formatter already supported both. --- python/lsst/daf/butler/formatters/yaml.py | 21 ++++++++++++++----- .../daf/butler/tests/_examplePythonTypes.py | 11 +++++++--- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/python/lsst/daf/butler/formatters/yaml.py b/python/lsst/daf/butler/formatters/yaml.py index 4e03250fb6..051fce5d8e 100644 --- a/python/lsst/daf/butler/formatters/yaml.py +++ b/python/lsst/daf/butler/formatters/yaml.py @@ -152,15 +152,26 @@ def _toBytes(self, inMemoryDataset: Any) -> bytes: This will fail for data structures that have complex python classes without a registered YAML representer. """ + converted = False if hasattr(inMemoryDataset, "model_dump") and hasattr(inMemoryDataset, "model_dump_json"): - # Pydantic-like model if both dump() and json() exist. + # Pydantic v2-like model if both model_dump() and model_json() + # exist. with contextlib.suppress(Exception): inMemoryDataset = inMemoryDataset.model_dump() + converted = True + + if not converted and hasattr(inMemoryDataset, "dict") and hasattr(inMemoryDataset, "json"): + # Pydantic v1-like model if both dict() and json() exist. + with contextlib.suppress(Exception): + inMemoryDataset = inMemoryDataset.dict() + converted = True + + if not converted: + if dataclasses.is_dataclass(inMemoryDataset): + inMemoryDataset = dataclasses.asdict(inMemoryDataset) + elif hasattr(inMemoryDataset, "_asdict"): + inMemoryDataset = inMemoryDataset._asdict() - if dataclasses.is_dataclass(inMemoryDataset): - inMemoryDataset = dataclasses.asdict(inMemoryDataset) - elif hasattr(inMemoryDataset, "_asdict"): - inMemoryDataset = inMemoryDataset._asdict() unsafe_dump = self.writeParameters.get("unsafe_dump", False) if unsafe_dump: serialized = yaml.dump(inMemoryDataset) diff --git a/python/lsst/daf/butler/tests/_examplePythonTypes.py b/python/lsst/daf/butler/tests/_examplePythonTypes.py index 3c64299415..3a4daa51e2 100644 --- a/python/lsst/daf/butler/tests/_examplePythonTypes.py +++ b/python/lsst/daf/butler/tests/_examplePythonTypes.py @@ -44,7 +44,7 @@ from typing import TYPE_CHECKING, Any from lsst.daf.butler import StorageClass, StorageClassDelegate -from lsst.daf.butler._compat import _BaseModelCompat +from pydantic import BaseModel if TYPE_CHECKING: from lsst.daf.butler import Butler, Datastore, FormatterFactory @@ -264,7 +264,7 @@ def makeFromDict(cls, exportDict: dict[str, list | dict | None]) -> MetricsExamp return cls(exportDict["summary"], exportDict["output"], data) -class MetricsExampleModel(_BaseModelCompat): +class MetricsExampleModel(BaseModel): """A variant of `MetricsExample` based on model.""" summary: dict[str, Any] | None = None @@ -274,7 +274,12 @@ class MetricsExampleModel(_BaseModelCompat): @classmethod def from_metrics(cls, metrics: MetricsExample) -> MetricsExampleModel: """Create a model based on an example.""" - return cls.model_validate(metrics.exportAsDict()) + d = metrics.exportAsDict() + # Assume pydantic v2 but fallback to v1 + try: + return cls.model_validate(d) + except AttributeError: + return cls.parse_obj(d) @dataclasses.dataclass