diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 7bb30f342f..942a726d2f 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -27,16 +27,18 @@ jobs:
channels: conda-forge,defaults
channel-priority: strict
show-channel-urls: true
+ miniforge-variant: Mambaforge
+ use-mamba: true
- name: Update pip/wheel infrastructure
shell: bash -l {0}
run: |
- conda install -y -q pip wheel
+ mamba install -y -q pip wheel
- name: Install sqlite
shell: bash -l {0}
run: |
- conda install -y -q sqlite
+ mamba install -y -q sqlite
# Postgres-14 is already installed from official postgres repo, but we
# also need pgsphere which is not installed. The repo is not in the list,
@@ -52,13 +54,13 @@ jobs:
- name: Install postgresql Python packages
shell: bash -l {0}
run: |
- conda install -y -q psycopg2
+ mamba install -y -q psycopg2
pip install testing.postgresql
- name: Install cryptography package for moto
shell: bash -l {0}
run: |
- conda install -y -q cryptography
+ mamba install -y -q cryptography
- name: Install dependencies
shell: bash -l {0}
@@ -69,13 +71,13 @@ jobs:
- name: Install pytest packages
shell: bash -l {0}
run: |
- conda install -y -q \
+ mamba install -y -q \
pytest pytest-xdist pytest-openfiles pytest-cov
- name: List installed packages
shell: bash -l {0}
run: |
- conda list
+ mamba list
pip list -v
- name: Build and install
diff --git a/.github/workflows/build_docs.yaml b/.github/workflows/build_docs.yaml
index a75bc73784..1e0386e9e5 100644
--- a/.github/workflows/build_docs.yaml
+++ b/.github/workflows/build_docs.yaml
@@ -18,7 +18,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: '3.10'
+ python-version: '3.11'
cache: "pip"
cache-dependency-path: "setup.cfg"
diff --git a/.github/workflows/rebase_checker.yaml b/.github/workflows/rebase_checker.yaml
index 62aeca77e5..65516d9207 100644
--- a/.github/workflows/rebase_checker.yaml
+++ b/.github/workflows/rebase_checker.yaml
@@ -1,4 +1,3 @@
----
name: Check that 'main' is not merged into the development branch
on: pull_request
diff --git a/doc/changes/DM-40002.feature.rst b/doc/changes/DM-40002.feature.rst
new file mode 100644
index 0000000000..16dd14726c
--- /dev/null
+++ b/doc/changes/DM-40002.feature.rst
@@ -0,0 +1 @@
+Modified to work natively with Pydantic v1 and v2.
diff --git a/mypy.ini b/mypy.ini
index 0b3417c1c3..caec25da68 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -71,7 +71,7 @@ disallow_untyped_defs = True
disallow_incomplete_defs = True
strict_equality = True
warn_unreachable = True
-warn_unused_ignores = True
+warn_unused_ignores = False
# ...except the modules and subpackages below (can't find a way to do line
# breaks in the lists of modules).
diff --git a/python/lsst/daf/butler/_compat.py b/python/lsst/daf/butler/_compat.py
new file mode 100644
index 0000000000..d17f246fa6
--- /dev/null
+++ b/python/lsst/daf/butler/_compat.py
@@ -0,0 +1,198 @@
+# This file is part of pipe_base.
+#
+# Developed for the LSST Data Management System.
+# This product includes software developed by the LSST Project
+# (https://www.lsst.org).
+# See the COPYRIGHT file at the top-level directory of this distribution
+# for details of code ownership.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Code to support backwards compatibility."""
+
+__all__ = ["PYDANTIC_V2", "_BaseModelCompat"]
+
+import sys
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Any
+
+from pydantic import BaseModel
+from pydantic.fields import FieldInfo
+from pydantic.version import VERSION as PYDANTIC_VERSION
+
+if sys.version_info >= (3, 11, 0):
+ from typing import Self
+else:
+ from typing import TypeVar
+
+ Self = TypeVar("Self", bound="_BaseModelCompat") # type: ignore
+
+
+PYDANTIC_V2 = PYDANTIC_VERSION.startswith("2.")
+
+
+if PYDANTIC_V2:
+
+ class _BaseModelCompat(BaseModel):
+ """Methods from pydantic v1 that we want to emulate in v2.
+
+ Some of these methods are provided by v2 but issue deprecation
+ warnings. We need to decide whether we are also okay with deprecating
+ them or want to support them without the deprecation message.
+ """
+
+ def json(
+ self,
+ *,
+ include: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None, # type: ignore
+ exclude: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None, # type: ignore
+ by_alias: bool = False,
+ skip_defaults: bool | None = None,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ exclude_none: bool = False,
+ encoder: Callable[[Any], Any] | None = None,
+ models_as_dict: bool = True,
+ **dumps_kwargs: Any,
+ ) -> str:
+ if dumps_kwargs:
+ raise TypeError("dumps_kwargs no longer supported.")
+ if encoder is not None:
+ raise TypeError("json encoder is no longer supported.")
+ # Can catch warnings and call BaseModel.json() directly.
+ return self.model_dump_json(
+ include=include,
+ exclude=exclude,
+ by_alias=by_alias,
+ exclude_defaults=exclude_defaults,
+ exclude_none=exclude_none,
+ exclude_unset=exclude_unset,
+ )
+
+ @classmethod
+ def parse_obj(cls, obj: Any) -> Self:
+ # Catch warnings and call BaseModel.parse_obj directly?
+ return cls.model_validate(obj)
+
+ if TYPE_CHECKING and not PYDANTIC_V2:
+ # mypy sees the first definition of a class and ignores any
+ # redefinition. This means that if mypy is run with pydantic v1
+ # it will not see the classes defined in the else block below.
+
+ @classmethod
+ def model_construct(cls, _fields_set: set[str] | None = None, **values: Any) -> Self:
+ return cls()
+
+ @classmethod
+ def model_validate(
+ cls,
+ obj: Any,
+ *,
+ strict: bool | None = None,
+ from_attributes: bool | None = None,
+ context: dict[str, Any] | None = None,
+ ) -> Self:
+ return cls()
+
+ def model_dump_json(
+ self,
+ *,
+ indent: int | None = None,
+ include: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None,
+ exclude: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None,
+ by_alias: bool = False,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ warnings: bool = True,
+ ) -> str:
+ return ""
+
+ @property
+ def model_fields(self) -> dict[str, FieldInfo]: # type: ignore
+ return {}
+
+ @classmethod
+ def model_rebuild(
+ cls,
+ *,
+ force: bool = False,
+ raise_errors: bool = True,
+ _parent_namespace_depth: int = 2,
+ _types_namespace: dict[str, Any] | None = None,
+ ) -> bool | None:
+ return None
+
+else:
+ from astropy.utils.decorators import classproperty
+
+ class _BaseModelCompat(BaseModel): # type:ignore[no-redef]
+ """Methods from pydantic v2 that can be used in pydantic v1."""
+
+ @classmethod
+ def model_validate(
+ cls,
+ obj: Any,
+ *,
+ strict: bool | None = None,
+ from_attributes: bool | None = None,
+ context: dict[str, Any] | None = None,
+ ) -> Self:
+ return cls.parse_obj(obj)
+
+ def model_dump_json(
+ self,
+ *,
+ indent: int | None = None,
+ include: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None,
+ exclude: set[int] | set[str] | dict[int, Any] | dict[str, Any] | None = None,
+ by_alias: bool = False,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ warnings: bool = True,
+ ) -> str:
+ return self.json(
+ include=include, # type: ignore
+ exclude=exclude, # type: ignore
+ by_alias=by_alias,
+ exclude_unset=exclude_unset,
+ exclude_defaults=exclude_defaults,
+ exclude_none=exclude_none,
+ )
+
+ @classmethod # type: ignore
+ def model_construct(cls, _fields_set: set[str] | None = None, **values: Any) -> Self:
+ # BaseModel.construct() is very close to what we previously
+ # implemented manually in each direct() method but does have one
+ # extra loop in it to fill in defaults and handle aliases.
+ return cls.construct(_fields_set=_fields_set, **values)
+
+ @classmethod
+ @classproperty
+ def model_fields(cls) -> dict[str, FieldInfo]: # type: ignore
+ return cls.__fields__ # type: ignore
+
+ @classmethod
+ def model_rebuild(
+ cls,
+ *,
+ force: bool = False,
+ raise_errors: bool = True,
+ _parent_namespace_depth: int = 2,
+ _types_namespace: dict[str, Any] | None = None,
+ ) -> bool | None:
+ return cls.update_forward_refs()
diff --git a/python/lsst/daf/butler/_quantum_backed.py b/python/lsst/daf/butler/_quantum_backed.py
index 62c33eaa56..a7f143d2a3 100644
--- a/python/lsst/daf/butler/_quantum_backed.py
+++ b/python/lsst/daf/butler/_quantum_backed.py
@@ -31,13 +31,9 @@
from typing import TYPE_CHECKING, Any
from deprecated.sphinx import deprecated
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.resources import ResourcePathExpression
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from ._butlerConfig import ButlerConfig
from ._deferredDatasetHandle import DeferredDatasetHandle
from ._limited_butler import LimitedButler
@@ -597,7 +593,7 @@ def extract_provenance_data(self) -> QuantumProvenanceData:
)
-class QuantumProvenanceData(BaseModel):
+class QuantumProvenanceData(_BaseModelCompat):
"""A serializable struct for per-quantum provenance information and
datastore records.
@@ -749,19 +745,16 @@ def _to_uuid_set(uuids: Iterable[str | uuid.UUID]) -> set[uuid.UUID]:
"""
return {uuid.UUID(id) if isinstance(id, str) else id for id in uuids}
- data = QuantumProvenanceData.__new__(cls)
- setter = object.__setattr__
- setter(data, "predicted_inputs", _to_uuid_set(predicted_inputs))
- setter(data, "available_inputs", _to_uuid_set(available_inputs))
- setter(data, "actual_inputs", _to_uuid_set(actual_inputs))
- setter(data, "predicted_outputs", _to_uuid_set(predicted_outputs))
- setter(data, "actual_outputs", _to_uuid_set(actual_outputs))
- setter(
- data,
- "datastore_records",
- {
+ data = cls.model_construct(
+ predicted_inputs=_to_uuid_set(predicted_inputs),
+ available_inputs=_to_uuid_set(available_inputs),
+ actual_inputs=_to_uuid_set(actual_inputs),
+ predicted_outputs=_to_uuid_set(predicted_outputs),
+ actual_outputs=_to_uuid_set(actual_outputs),
+ datastore_records={
key: SerializedDatastoreRecordData.direct(**records)
for key, records in datastore_records.items()
},
)
+
return data
diff --git a/python/lsst/daf/butler/core/datasets/ref.py b/python/lsst/daf/butler/core/datasets/ref.py
index d036c8857f..49af583d5b 100644
--- a/python/lsst/daf/butler/core/datasets/ref.py
+++ b/python/lsst/daf/butler/core/datasets/ref.py
@@ -33,14 +33,11 @@
import sys
import uuid
from collections.abc import Iterable
-from typing import TYPE_CHECKING, Any, ClassVar, Protocol, runtime_checkable
+from typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeAlias, runtime_checkable
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.utils.classes import immutable
-
-try:
- from pydantic.v1 import BaseModel, StrictStr, validator
-except ModuleNotFoundError:
- from pydantic import BaseModel, StrictStr, validator # type: ignore
+from pydantic import StrictStr, validator
from ..configSupport import LookupKey
from ..dimensions import DataCoordinate, DimensionGraph, DimensionUniverse, SerializedDataCoordinate
@@ -173,7 +170,7 @@ def makeDatasetId(
_serializedDatasetRefFieldsSet = {"id", "datasetType", "dataId", "run", "component"}
-class SerializedDatasetRef(BaseModel):
+class SerializedDatasetRef(_BaseModelCompat):
"""Simplified model of a `DatasetRef` suitable for serialization."""
id: uuid.UUID
@@ -224,22 +221,24 @@ def direct(
This method should only be called when the inputs are trusted.
"""
- node = SerializedDatasetRef.__new__(cls)
- setter = object.__setattr__
- setter(node, "id", uuid.UUID(id))
- setter(
- node,
- "datasetType",
- datasetType if datasetType is None else SerializedDatasetType.direct(**datasetType),
+ serialized_datasetType = (
+ SerializedDatasetType.direct(**datasetType) if datasetType is not None else None
+ )
+ serialized_dataId = SerializedDataCoordinate.direct(**dataId) if dataId is not None else None
+
+ node = cls.model_construct(
+ _fields_set=_serializedDatasetRefFieldsSet,
+ id=uuid.UUID(id),
+ datasetType=serialized_datasetType,
+ dataId=serialized_dataId,
+ run=sys.intern(run),
+ component=component,
)
- setter(node, "dataId", dataId if dataId is None else SerializedDataCoordinate.direct(**dataId))
- setter(node, "run", sys.intern(run))
- setter(node, "component", component)
- setter(node, "__fields_set__", _serializedDatasetRefFieldsSet)
+
return node
-DatasetId = uuid.UUID
+DatasetId: TypeAlias = uuid.UUID
"""A type-annotation alias for dataset ID providing typing flexibility.
"""
diff --git a/python/lsst/daf/butler/core/datasets/type.py b/python/lsst/daf/butler/core/datasets/type.py
index f8b5293e6d..080df18c8b 100644
--- a/python/lsst/daf/butler/core/datasets/type.py
+++ b/python/lsst/daf/butler/core/datasets/type.py
@@ -29,10 +29,8 @@
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, ClassVar
-try:
- from pydantic.v1 import BaseModel, StrictBool, StrictStr
-except ModuleNotFoundError:
- from pydantic import BaseModel, StrictBool, StrictStr # type: ignore
+from lsst.daf.butler._compat import _BaseModelCompat
+from pydantic import StrictBool, StrictStr
from ..configSupport import LookupKey
from ..dimensions import DimensionGraph, SerializedDimensionGraph
@@ -51,7 +49,7 @@ def _safeMakeMappingProxyType(data: Mapping | None) -> Mapping:
return MappingProxyType(data)
-class SerializedDatasetType(BaseModel):
+class SerializedDatasetType(_BaseModelCompat):
"""Simplified model of a `DatasetType` suitable for serialization."""
name: StrictStr
@@ -82,22 +80,19 @@ def direct(
key = (name, storageClass or "")
if cache is not None and (type_ := cache.get(key, None)) is not None:
return type_
- node = SerializedDatasetType.__new__(cls)
- setter = object.__setattr__
- setter(node, "name", name)
- setter(node, "storageClass", storageClass)
- setter(
- node,
- "dimensions",
- dimensions if dimensions is None else SerializedDimensionGraph.direct(**dimensions),
+
+ serialized_dimensions = (
+ SerializedDimensionGraph.direct(**dimensions) if dimensions is not None else None
)
- setter(node, "parentStorageClass", parentStorageClass)
- setter(node, "isCalibration", isCalibration)
- setter(
- node,
- "__fields_set__",
- {"name", "storageClass", "dimensions", "parentStorageClass", "isCalibration"},
+
+ node = cls.model_construct(
+ name=name,
+ storageClass=storageClass,
+ dimensions=serialized_dimensions,
+ parentStorageClass=parentStorageClass,
+ isCalibration=isCalibration,
)
+
if cache is not None:
cache[key] = node
return node
diff --git a/python/lsst/daf/butler/core/datastoreCacheManager.py b/python/lsst/daf/butler/core/datastoreCacheManager.py
index 37b67bf695..e8d03e6add 100644
--- a/python/lsst/daf/butler/core/datastoreCacheManager.py
+++ b/python/lsst/daf/butler/core/datastoreCacheManager.py
@@ -45,12 +45,9 @@
from random import Random
from typing import TYPE_CHECKING
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.resources import ResourcePath
-
-try:
- from pydantic.v1 import BaseModel, PrivateAttr
-except ModuleNotFoundError:
- from pydantic import BaseModel, PrivateAttr # type: ignore
+from pydantic import PrivateAttr
from .config import ConfigSubset
from .configSupport import processLookupConfigs
@@ -124,7 +121,7 @@ def _parse_cache_name(cached_location: str) -> tuple[uuid.UUID, str | None, str
return id_, component, ext
-class CacheEntry(BaseModel):
+class CacheEntry(_BaseModelCompat):
"""Represent an entry in the cache."""
name: str
@@ -172,7 +169,7 @@ class _MarkerEntry(CacheEntry):
pass
-class CacheRegistry(BaseModel):
+class CacheRegistry(_BaseModelCompat):
"""Collection of cache entries."""
_size: int = PrivateAttr(0)
diff --git a/python/lsst/daf/butler/core/datastoreRecordData.py b/python/lsst/daf/butler/core/datastoreRecordData.py
index 744af46006..93ae3667b2 100644
--- a/python/lsst/daf/butler/core/datastoreRecordData.py
+++ b/python/lsst/daf/butler/core/datastoreRecordData.py
@@ -28,16 +28,12 @@
import dataclasses
import uuid
from collections.abc import Mapping
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, TypeAlias
+from lsst.daf.butler._compat import PYDANTIC_V2, _BaseModelCompat
from lsst.utils import doImportType
from lsst.utils.introspection import get_full_type_name
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from .datasets import DatasetId
from .dimensions import DimensionUniverse
from .persistenceContext import PersistenceContextVars
@@ -46,10 +42,16 @@
if TYPE_CHECKING:
from ..registry import Registry
-_Record = dict[str, Any]
+# Pydantic 2 requires we be explicit about the types that are used in
+# datastore records. Without this UUID can not be handled. Pydantic v1
+# wants the opposite and does not work unless we use Any.
+if PYDANTIC_V2:
+ _Record: TypeAlias = dict[str, int | str | uuid.UUID | None]
+else:
+ _Record: TypeAlias = dict[str, Any] # type: ignore
-class SerializedDatastoreRecordData(BaseModel):
+class SerializedDatastoreRecordData(_BaseModelCompat):
"""Representation of a `DatastoreRecordData` suitable for serialization."""
dataset_ids: list[uuid.UUID]
@@ -75,10 +77,6 @@ def direct(
This method should only be called when the inputs are trusted.
"""
- data = SerializedDatastoreRecordData.__new__(cls)
- setter = object.__setattr__
- # JSON makes strings out of UUIDs, need to convert them back
- setter(data, "dataset_ids", [uuid.UUID(id) if isinstance(id, str) else id for id in dataset_ids])
# See also comments in record_ids_to_uuid()
for table_data in records.values():
for table_records in table_data.values():
@@ -87,7 +85,14 @@ def direct(
# columns that are UUIDs we'd need more generic approach.
if (id := record.get("dataset_id")) is not None:
record["dataset_id"] = uuid.UUID(id) if isinstance(id, str) else id
- setter(data, "records", records)
+
+ data = cls.model_construct(
+ _fields_set={"dataset_ids", "records"},
+ # JSON makes strings out of UUIDs, need to convert them back
+ dataset_ids=[uuid.UUID(id) if isinstance(id, str) else id for id in dataset_ids],
+ records=records,
+ )
+
return data
diff --git a/python/lsst/daf/butler/core/dimensions/_coordinate.py b/python/lsst/daf/butler/core/dimensions/_coordinate.py
index 175e52a855..e914dfe085 100644
--- a/python/lsst/daf/butler/core/dimensions/_coordinate.py
+++ b/python/lsst/daf/butler/core/dimensions/_coordinate.py
@@ -34,13 +34,9 @@
from typing import TYPE_CHECKING, Any, ClassVar, Literal, overload
from deprecated.sphinx import deprecated
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.sphgeom import IntersectionRegion, Region
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from ..json import from_json_pydantic, to_json_pydantic
from ..named import NamedKeyDict, NamedKeyMapping, NamedValueAbstractSet, NameLookupMapping
from ..persistenceContext import PersistenceContextVars
@@ -65,14 +61,16 @@
"""
-class SerializedDataCoordinate(BaseModel):
+class SerializedDataCoordinate(_BaseModelCompat):
"""Simplified model for serializing a `DataCoordinate`."""
dataId: dict[str, DataIdValue]
records: dict[str, SerializedDimensionRecord] | None = None
@classmethod
- def direct(cls, *, dataId: dict[str, DataIdValue], records: dict[str, dict]) -> SerializedDataCoordinate:
+ def direct(
+ cls, *, dataId: dict[str, DataIdValue], records: dict[str, dict] | None
+ ) -> SerializedDataCoordinate:
"""Construct a `SerializedDataCoordinate` directly without validators.
This differs from the pydantic "construct" method in that the arguments
@@ -85,17 +83,14 @@ def direct(cls, *, dataId: dict[str, DataIdValue], records: dict[str, dict]) ->
cache = PersistenceContextVars.serializedDataCoordinateMapping.get()
if cache is not None and (result := cache.get(key)) is not None:
return result
- node = SerializedDataCoordinate.__new__(cls)
- setter = object.__setattr__
- setter(node, "dataId", dataId)
- setter(
- node,
- "records",
- records
- if records is None
- else {k: SerializedDimensionRecord.direct(**v) for k, v in records.items()},
- )
- setter(node, "__fields_set__", {"dataId", "records"})
+
+ if records is None:
+ serialized_records = None
+ else:
+ serialized_records = {k: SerializedDimensionRecord.direct(**v) for k, v in records.items()}
+
+ node = cls.model_construct(dataId=dataId, records=serialized_records)
+
if cache is not None:
cache[key] = node
return node
diff --git a/python/lsst/daf/butler/core/dimensions/_graph.py b/python/lsst/daf/butler/core/dimensions/_graph.py
index c5233aba33..e00b4e1a2a 100644
--- a/python/lsst/daf/butler/core/dimensions/_graph.py
+++ b/python/lsst/daf/butler/core/dimensions/_graph.py
@@ -28,13 +28,9 @@
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, ClassVar
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.utils.classes import cached_getter, immutable
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from .._topology import TopologicalFamily, TopologicalSpace
from ..json import from_json_pydantic, to_json_pydantic
from ..named import NamedValueAbstractSet, NamedValueSet
@@ -46,7 +42,7 @@
from ._universe import DimensionUniverse
-class SerializedDimensionGraph(BaseModel):
+class SerializedDimensionGraph(_BaseModelCompat):
"""Simplified model of a `DimensionGraph` suitable for serialization."""
names: list[str]
@@ -61,10 +57,7 @@ def direct(cls, *, names: list[str]) -> SerializedDimensionGraph:
This method should only be called when the inputs are trusted.
"""
- node = SerializedDimensionGraph.__new__(cls)
- object.__setattr__(node, "names", names)
- object.__setattr__(node, "__fields_set__", {"names"})
- return node
+ return cls.model_construct(names=names)
@immutable
diff --git a/python/lsst/daf/butler/core/dimensions/_records.py b/python/lsst/daf/butler/core/dimensions/_records.py
index b8fdbde73b..882ef5938d 100644
--- a/python/lsst/daf/butler/core/dimensions/_records.py
+++ b/python/lsst/daf/butler/core/dimensions/_records.py
@@ -26,20 +26,9 @@
from typing import TYPE_CHECKING, Any, ClassVar, Optional, Tuple
import lsst.sphgeom
+from lsst.daf.butler._compat import PYDANTIC_V2, _BaseModelCompat
from lsst.utils.classes import immutable
-
-try:
- from pydantic.v1 import BaseModel, Field, StrictBool, StrictFloat, StrictInt, StrictStr, create_model
-except ModuleNotFoundError:
- from pydantic import ( # type: ignore
- BaseModel,
- Field,
- StrictBool,
- StrictFloat,
- StrictInt,
- StrictStr,
- create_model,
- )
+from pydantic import Field, StrictBool, StrictFloat, StrictInt, StrictStr, create_model
from ..json import from_json_pydantic, to_json_pydantic
from ..persistenceContext import PersistenceContextVars
@@ -78,7 +67,7 @@ def _subclassDimensionRecord(definition: DimensionElement) -> type[DimensionReco
return type(definition.name + ".RecordClass", (DimensionRecord,), d)
-class SpecificSerializedDimensionRecord(BaseModel, extra="forbid"):
+class SpecificSerializedDimensionRecord(_BaseModelCompat, extra="forbid"):
"""Base model for a specific serialized record content."""
@@ -128,7 +117,7 @@ def _createSimpleRecordSubclass(definition: DimensionElement) -> type[SpecificSe
return model
-class SerializedDimensionRecord(BaseModel):
+class SerializedDimensionRecord(_BaseModelCompat):
"""Simplified model for serializing a `DimensionRecord`."""
definition: str = Field(
@@ -147,22 +136,24 @@ class SerializedDimensionRecord(BaseModel):
},
)
- class Config:
- """Local configuration overrides for model."""
-
- schema_extra = {
- "example": {
- "definition": "detector",
- "record": {
- "instrument": "HSC",
- "id": 72,
- "full_name": "0_01",
- "name_in_raft": "01",
- "raft": "0",
- "purpose": "SCIENCE",
- },
+ if not PYDANTIC_V2:
+
+ class Config:
+ """Local configuration overrides for model."""
+
+ schema_extra = {
+ "example": {
+ "definition": "detector",
+ "record": {
+ "instrument": "HSC",
+ "id": 72,
+ "full_name": "0_01",
+ "name_in_raft": "01",
+ "raft": "0",
+ "purpose": "SCIENCE",
+ },
+ }
}
- }
@classmethod
def direct(
@@ -189,16 +180,14 @@ def direct(
cache = PersistenceContextVars.serializedDimensionRecordMapping.get()
if cache is not None and (result := cache.get(key)) is not None:
return result
- node = SerializedDimensionRecord.__new__(cls)
- setter = object.__setattr__
- setter(node, "definition", definition)
+
# This method requires tuples as values of the mapping, but JSON
# readers will read things in as lists. Be kind and transparently
# transform to tuples
- setter(
- node, "record", {k: v if type(v) != list else tuple(v) for k, v in record.items()} # type: ignore
- )
- setter(node, "__fields_set__", {"definition", "record"})
+ serialized_record = {k: v if type(v) != list else tuple(v) for k, v in record.items()} # type: ignore
+
+ node = cls.model_construct(definition=definition, record=serialized_record) # type: ignore
+
if cache is not None:
cache[key] = node
return node
diff --git a/python/lsst/daf/butler/core/logging.py b/python/lsst/daf/butler/core/logging.py
index bd2ed137a5..c3da60c840 100644
--- a/python/lsst/daf/butler/core/logging.py
+++ b/python/lsst/daf/butler/core/logging.py
@@ -29,13 +29,10 @@
from logging import Formatter, LogRecord, StreamHandler
from typing import IO, Any, ClassVar, Union, overload
+from lsst.daf.butler._compat import PYDANTIC_V2, _BaseModelCompat
from lsst.utils.introspection import get_full_type_name
from lsst.utils.iteration import isplit
-
-try:
- from pydantic.v1 import BaseModel, PrivateAttr
-except ModuleNotFoundError:
- from pydantic import BaseModel, PrivateAttr # type: ignore
+from pydantic import PrivateAttr
_LONG_LOG_FORMAT = "{levelname} {asctime} {name} {filename}:{lineno} - {message}"
"""Default format for log records."""
@@ -160,7 +157,7 @@ def restore_log_record_factory(cls) -> None:
logging.setLogRecordFactory(cls._old_factory)
-class ButlerLogRecord(BaseModel):
+class ButlerLogRecord(_BaseModelCompat):
"""A model representing a `logging.LogRecord`.
A `~logging.LogRecord` always uses the current time in its record
@@ -271,12 +268,27 @@ def __str__(self) -> str:
Record = LogRecord | ButlerLogRecord
+if PYDANTIC_V2:
+ from pydantic import RootModel # type: ignore
+
+ class _ButlerLogRecords(RootModel):
+ root: list[ButlerLogRecord]
+
+else:
+
+ class _ButlerLogRecords(_BaseModelCompat): # type:ignore[no-redef]
+ __root__: list[ButlerLogRecord]
+
+ @property
+ def root(self) -> list[ButlerLogRecord]:
+ return self.__root__
+
+
# Do not inherit from MutableSequence since mypy insists on the values
# being Any even though we wish to constrain them to Record.
-class ButlerLogRecords(BaseModel):
+class ButlerLogRecords(_ButlerLogRecords):
"""Class representing a collection of `ButlerLogRecord`."""
- __root__: list[ButlerLogRecord]
_log_format: str | None = PrivateAttr(None)
@classmethod
@@ -288,7 +300,10 @@ def from_records(cls, records: Iterable[ButlerLogRecord]) -> "ButlerLogRecords":
records : iterable of `ButlerLogRecord`
The records to seed this class with.
"""
- return cls(__root__=list(records))
+ if PYDANTIC_V2:
+ return cls(list(records)) # type: ignore
+ else:
+ return cls(__root__=list(records)) # type: ignore
@classmethod
def from_file(cls, filename: str) -> "ButlerLogRecords":
@@ -461,16 +476,16 @@ def set_log_format(self, format: str | None) -> str | None:
return previous
def __len__(self) -> int:
- return len(self.__root__)
+ return len(self.root)
# The signature does not match the one in BaseModel but that is okay
# if __root__ is being used.
# See https://pydantic-docs.helpmanual.io/usage/models/#custom-root-types
def __iter__(self) -> Iterator[ButlerLogRecord]: # type: ignore
- return iter(self.__root__)
+ return iter(self.root)
def __setitem__(self, index: int, value: Record) -> None:
- self.__root__[index] = self._validate_record(value)
+ self.root[index] = self._validate_record(value)
@overload
def __getitem__(self, index: int) -> ButlerLogRecord:
@@ -483,21 +498,24 @@ def __getitem__(self, index: slice) -> "ButlerLogRecords":
def __getitem__(self, index: slice | int) -> "Union[ButlerLogRecords, ButlerLogRecord]":
# Handles slices and returns a new collection in that
# case.
- item = self.__root__[index]
+ item = self.root[index]
if isinstance(item, list):
- return type(self)(__root__=item)
+ if PYDANTIC_V2:
+ return type(self)(item) # type: ignore
+ else:
+ return type(self)(__root__=item) # type: ignore
else:
return item
def __reversed__(self) -> Iterator[ButlerLogRecord]:
- return self.__root__.__reversed__()
+ return self.root.__reversed__()
def __delitem__(self, index: slice | int) -> None:
- del self.__root__[index]
+ del self.root[index]
def __str__(self) -> str:
# Ensure that every record uses the same format string.
- return "\n".join(record.format(self.log_format) for record in self.__root__)
+ return "\n".join(record.format(self.log_format) for record in self.root)
def _validate_record(self, record: Record) -> ButlerLogRecord:
if isinstance(record, ButlerLogRecord):
@@ -509,23 +527,23 @@ def _validate_record(self, record: Record) -> ButlerLogRecord:
return record
def insert(self, index: int, value: Record) -> None:
- self.__root__.insert(index, self._validate_record(value))
+ self.root.insert(index, self._validate_record(value))
def append(self, value: Record) -> None:
value = self._validate_record(value)
- self.__root__.append(value)
+ self.root.append(value)
def clear(self) -> None:
- self.__root__.clear()
+ self.root.clear()
def extend(self, records: Iterable[Record]) -> None:
- self.__root__.extend(self._validate_record(record) for record in records)
+ self.root.extend(self._validate_record(record) for record in records)
def pop(self, index: int = -1) -> ButlerLogRecord:
- return self.__root__.pop(index)
+ return self.root.pop(index)
def reverse(self) -> None:
- self.__root__.reverse()
+ self.root.reverse()
class ButlerLogRecordHandler(StreamHandler):
@@ -533,7 +551,10 @@ class ButlerLogRecordHandler(StreamHandler):
def __init__(self) -> None:
super().__init__()
- self.records = ButlerLogRecords(__root__=[])
+ if PYDANTIC_V2:
+ self.records = ButlerLogRecords([]) # type: ignore
+ else:
+ self.records = ButlerLogRecords(__root__=[]) # type: ignore
def emit(self, record: LogRecord) -> None:
self.records.append(record)
diff --git a/python/lsst/daf/butler/core/quantum.py b/python/lsst/daf/butler/core/quantum.py
index a488cdf448..21344f7ddd 100644
--- a/python/lsst/daf/butler/core/quantum.py
+++ b/python/lsst/daf/butler/core/quantum.py
@@ -28,14 +28,10 @@
from collections.abc import Iterable, Mapping, MutableMapping, Sequence
from typing import Any
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.utils import doImportType
from lsst.utils.introspection import find_outside_stacklevel
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from .datasets import DatasetRef, DatasetType, SerializedDatasetRef, SerializedDatasetType
from .datastoreRecordData import DatastoreRecordData, SerializedDatastoreRecordData
from .dimensions import (
@@ -73,7 +69,7 @@ def _reconstructDatasetRef(
return rebuiltDatasetRef
-class SerializedQuantum(BaseModel):
+class SerializedQuantum(_BaseModelCompat):
"""Simplified model of a `Quantum` suitable for serialization."""
taskName: str | None = None
@@ -106,60 +102,41 @@ def direct(
This method should only be called when the inputs are trusted.
"""
- node = SerializedQuantum.__new__(cls)
- setter = object.__setattr__
- setter(node, "taskName", sys.intern(taskName or ""))
- setter(node, "dataId", dataId if dataId is None else SerializedDataCoordinate.direct(**dataId))
-
- setter(
- node,
- "datasetTypeMapping",
- {k: SerializedDatasetType.direct(**v) for k, v in datasetTypeMapping.items()},
- )
-
- setter(
- node,
- "initInputs",
- {k: (SerializedDatasetRef.direct(**v), refs) for k, (v, refs) in initInputs.items()},
- )
- setter(
- node,
- "inputs",
- {k: [(SerializedDatasetRef.direct(**ref), id) for ref, id in v] for k, v in inputs.items()},
- )
- setter(
- node,
- "outputs",
- {k: [(SerializedDatasetRef.direct(**ref), id) for ref, id in v] for k, v in outputs.items()},
- )
- setter(
- node,
- "dimensionRecords",
- dimensionRecords
- if dimensionRecords is None
- else {int(k): SerializedDimensionRecord.direct(**v) for k, v in dimensionRecords.items()},
+ serialized_dataId = SerializedDataCoordinate.direct(**dataId) if dataId is not None else None
+ serialized_datasetTypeMapping = {
+ k: SerializedDatasetType.direct(**v) for k, v in datasetTypeMapping.items()
+ }
+ serialized_initInputs = {
+ k: (SerializedDatasetRef.direct(**v), refs) for k, (v, refs) in initInputs.items()
+ }
+ serialized_inputs = {
+ k: [(SerializedDatasetRef.direct(**ref), id) for ref, id in v] for k, v in inputs.items()
+ }
+ serialized_outputs = {
+ k: [(SerializedDatasetRef.direct(**ref), id) for ref, id in v] for k, v in outputs.items()
+ }
+ serialized_records = (
+ {int(k): SerializedDimensionRecord.direct(**v) for k, v in dimensionRecords.items()}
+ if dimensionRecords is not None
+ else None
)
- setter(
- node,
- "datastoreRecords",
- datastoreRecords
- if datastoreRecords is None
- else {k: SerializedDatastoreRecordData.direct(**v) for k, v in datastoreRecords.items()},
+ serialized_datastore_records = (
+ {k: SerializedDatastoreRecordData.direct(**v) for k, v in datastoreRecords.items()}
+ if datastoreRecords is not None
+ else None
)
- setter(
- node,
- "__fields_set__",
- {
- "taskName",
- "dataId",
- "datasetTypeMapping",
- "initInputs",
- "inputs",
- "outputs",
- "dimensionRecords",
- "datastore_records",
- },
+
+ node = cls.model_construct(
+ taskName=sys.intern(taskName or ""),
+ dataId=serialized_dataId,
+ datasetTypeMapping=serialized_datasetTypeMapping,
+ initInputs=serialized_initInputs,
+ inputs=serialized_inputs,
+ outputs=serialized_outputs,
+ dimensionRecords=serialized_records,
+ datastoreRecords=serialized_datastore_records,
)
+
return node
diff --git a/python/lsst/daf/butler/core/serverModels.py b/python/lsst/daf/butler/core/serverModels.py
index f8f2b56397..113fe20d75 100644
--- a/python/lsst/daf/butler/core/serverModels.py
+++ b/python/lsst/daf/butler/core/serverModels.py
@@ -33,12 +33,9 @@
from collections.abc import Mapping
from typing import Any, ClassVar
+from lsst.daf.butler._compat import _BaseModelCompat
from lsst.utils.iteration import ensure_iterable
-
-try:
- from pydantic.v1 import BaseModel, Field, validator
-except ModuleNotFoundError:
- from pydantic import BaseModel, Field, validator # type: ignore
+from pydantic import Field, validator
from .dimensions import DataIdValue, SerializedDataCoordinate
from .utils import globToRegex
@@ -53,7 +50,7 @@
SimpleDataId = Mapping[str, DataIdValue]
-class ExpressionQueryParameter(BaseModel):
+class ExpressionQueryParameter(_BaseModelCompat):
"""Represents a specification for an expression query.
Generally used for collection or dataset type expressions. This
@@ -198,7 +195,7 @@ class DatasetsQueryParameter(ExpressionQueryParameter):
)
-class QueryBaseModel(BaseModel):
+class QueryBaseModel(_BaseModelCompat):
"""Base model for all query models."""
@validator("keyword_args", check_fields=False)
diff --git a/python/lsst/daf/butler/registry/obscore/_config.py b/python/lsst/daf/butler/registry/obscore/_config.py
index 2c2e21e584..7f84a2f5b8 100644
--- a/python/lsst/daf/butler/registry/obscore/_config.py
+++ b/python/lsst/daf/butler/registry/obscore/_config.py
@@ -35,10 +35,8 @@
from collections.abc import Mapping
from typing import Any
-try:
- from pydantic.v1 import BaseModel, StrictBool, StrictFloat, StrictInt, StrictStr, validator
-except ModuleNotFoundError:
- from pydantic import BaseModel, StrictBool, StrictFloat, StrictInt, StrictStr, validator # type: ignore
+from lsst.daf.butler._compat import _BaseModelCompat
+from pydantic import StrictBool, StrictFloat, StrictInt, StrictStr, validator
class ExtraColumnType(str, enum.Enum):
@@ -50,7 +48,7 @@ class ExtraColumnType(str, enum.Enum):
string = "string"
-class ExtraColumnConfig(BaseModel):
+class ExtraColumnConfig(_BaseModelCompat):
"""Configuration class describing specification of additional column in
obscore table.
"""
@@ -68,7 +66,7 @@ class ExtraColumnConfig(BaseModel):
"""Documentation string for this column."""
-class DatasetTypeConfig(BaseModel):
+class DatasetTypeConfig(_BaseModelCompat):
"""Configuration describing dataset type-related options."""
dataproduct_type: str
@@ -107,7 +105,7 @@ class DatasetTypeConfig(BaseModel):
values, or ExtraColumnConfig mappings."""
-class SpatialPluginConfig(BaseModel):
+class SpatialPluginConfig(_BaseModelCompat):
"""Configuration class for a spatial plugin."""
cls: str
@@ -117,7 +115,7 @@ class SpatialPluginConfig(BaseModel):
"""Configuration object passed to plugin ``initialize()`` method."""
-class ObsCoreConfig(BaseModel):
+class ObsCoreConfig(_BaseModelCompat):
"""Configuration which controls conversion of Registry datasets into
obscore records.
diff --git a/python/lsst/daf/butler/registry/tests/_registry.py b/python/lsst/daf/butler/registry/tests/_registry.py
index 5ba82f76ad..ecad9ff673 100644
--- a/python/lsst/daf/butler/registry/tests/_registry.py
+++ b/python/lsst/daf/butler/registry/tests/_registry.py
@@ -1637,7 +1637,7 @@ def testStorageClassPropagation(self):
registry = self.makeRegistry()
self.loadData(registry, "base.yaml")
dataset_type_in_registry = DatasetType(
- "tbl", dimensions=["instrument"], storageClass="DataFrame", universe=registry.dimensions
+ "tbl", dimensions=["instrument"], storageClass="Packages", universe=registry.dimensions
)
registry.registerDatasetType(dataset_type_in_registry)
run = "run1"
@@ -1647,7 +1647,7 @@ def testStorageClassPropagation(self):
)
self.assertEqual(inserted_ref.datasetType, dataset_type_in_registry)
query_dataset_type = DatasetType(
- "tbl", dimensions=["instrument"], storageClass="ArrowAstropy", universe=registry.dimensions
+ "tbl", dimensions=["instrument"], storageClass="StructuredDataDict", universe=registry.dimensions
)
self.assertNotEqual(dataset_type_in_registry, query_dataset_type)
query_datasets_result = registry.queryDatasets(query_dataset_type, collections=[run])
diff --git a/python/lsst/daf/butler/registry/wildcards.py b/python/lsst/daf/butler/registry/wildcards.py
index 8affcc2b7a..2666926eb9 100644
--- a/python/lsst/daf/butler/registry/wildcards.py
+++ b/python/lsst/daf/butler/registry/wildcards.py
@@ -34,13 +34,9 @@
from typing import Any
from deprecated.sphinx import deprecated
+from lsst.daf.butler._compat import PYDANTIC_V2
from lsst.utils.iteration import ensure_iterable
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
-
from ..core import DatasetType
from ..core.utils import globToRegex
from ._exceptions import CollectionExpressionError, DatasetTypeExpressionError
@@ -262,12 +258,29 @@ def process(element: Any, alreadyCoerced: bool = False) -> EllipsisType | None:
"""
+if PYDANTIC_V2:
+ from pydantic import RootModel # type: ignore
+
+ class _CollectionSearch(RootModel):
+ root: tuple[str, ...]
+
+else:
+ from pydantic import BaseModel
+
+ class _CollectionSearch(BaseModel, Sequence[str]): # type: ignore
+ __root__: tuple[str, ...]
+
+ @property
+ def root(self) -> tuple[str, ...]:
+ return self.__root__
+
+
@deprecated(
reason="Tuples of string collection names are now preferred. Will be removed after v26.",
version="v25.0",
category=FutureWarning,
)
-class CollectionSearch(BaseModel, Sequence[str]):
+class CollectionSearch(_CollectionSearch):
"""An ordered search path of collections.
The `fromExpression` method should almost always be used to construct
@@ -297,8 +310,6 @@ class CollectionSearch(BaseModel, Sequence[str]):
how different the original expressions appear.
"""
- __root__: tuple[str, ...]
-
@classmethod
def fromExpression(cls, expression: Any) -> CollectionSearch:
"""Process a general expression to construct a `CollectionSearch`
@@ -342,31 +353,35 @@ def fromExpression(cls, expression: Any) -> CollectionSearch:
for name in wildcard.strings:
if name not in deduplicated:
deduplicated.append(name)
- return cls(__root__=tuple(deduplicated))
+ if PYDANTIC_V2:
+ model = cls(tuple(deduplicated)) # type: ignore
+ else:
+ model = cls(__root__=tuple(deduplicated)) # type: ignore
+ return model
def explicitNames(self) -> Iterator[str]:
"""Iterate over collection names that were specified explicitly."""
- yield from self.__root__
+ yield from self.root
def __iter__(self) -> Iterator[str]: # type: ignore
- yield from self.__root__
+ yield from self.root
def __len__(self) -> int:
- return len(self.__root__)
+ return len(self.root)
def __getitem__(self, index: Any) -> str:
- return self.__root__[index]
+ return self.root[index]
def __eq__(self, other: Any) -> bool:
if isinstance(other, CollectionSearch):
- return self.__root__ == other.__root__
+ return self.root == other.root
return False
def __str__(self) -> str:
return "[{}]".format(", ".join(self))
def __repr__(self) -> str:
- return f"CollectionSearch({self.__root__!r})"
+ return f"CollectionSearch({self.root!r})"
@dataclasses.dataclass(frozen=True)
diff --git a/python/lsst/daf/butler/server.py b/python/lsst/daf/butler/server.py
index e6996ac134..b33ab04012 100644
--- a/python/lsst/daf/butler/server.py
+++ b/python/lsst/daf/butler/server.py
@@ -36,7 +36,6 @@
DataCoordinate,
DatasetId,
DatasetRef,
- DimensionConfig,
SerializedDataCoordinate,
SerializedDatasetRef,
SerializedDatasetType,
@@ -133,13 +132,13 @@ def read_server_config() -> Mapping:
db:
"""
config = Config.fromString(config_str, format="yaml")
- return config
+ return config.toDict()
@app.get("/butler/v1/universe", response_model=dict[str, Any])
-def get_dimension_universe(butler: Butler = Depends(butler_readonly_dependency)) -> DimensionConfig:
+def get_dimension_universe(butler: Butler = Depends(butler_readonly_dependency)) -> dict[str, Any]:
"""Allow remote client to get dimensions definition."""
- return butler.dimensions.dimensionConfig
+ return butler.dimensions.dimensionConfig.toDict()
@app.get("/butler/v1/uri/{id}", response_model=str)
diff --git a/python/lsst/daf/butler/tests/_examplePythonTypes.py b/python/lsst/daf/butler/tests/_examplePythonTypes.py
index c7599dac6d..96a9bd01f1 100644
--- a/python/lsst/daf/butler/tests/_examplePythonTypes.py
+++ b/python/lsst/daf/butler/tests/_examplePythonTypes.py
@@ -44,11 +44,7 @@
from typing import TYPE_CHECKING, Any
from lsst.daf.butler import StorageClass, StorageClassDelegate
-
-try:
- from pydantic.v1 import BaseModel
-except ModuleNotFoundError:
- from pydantic import BaseModel # type: ignore
+from lsst.daf.butler._compat import _BaseModelCompat
if TYPE_CHECKING:
from lsst.daf.butler import Butler, Datastore, FormatterFactory
@@ -268,7 +264,7 @@ def makeFromDict(cls, exportDict: dict[str, list | dict | None]) -> MetricsExamp
return cls(exportDict["summary"], exportDict["output"], data)
-class MetricsExampleModel(BaseModel):
+class MetricsExampleModel(_BaseModelCompat):
"""A variant of `MetricsExample` based on model."""
summary: dict[str, Any] | None = None
diff --git a/python/lsst/daf/butler/tests/dict_convertible_model.py b/python/lsst/daf/butler/tests/dict_convertible_model.py
index ca8b206420..fb67fa4052 100644
--- a/python/lsst/daf/butler/tests/dict_convertible_model.py
+++ b/python/lsst/daf/butler/tests/dict_convertible_model.py
@@ -25,13 +25,11 @@
from collections.abc import Mapping
-try:
- from pydantic.v1 import BaseModel, Field
-except ModuleNotFoundError:
- from pydantic import BaseModel, Field # type: ignore
+from lsst.daf.butler._compat import _BaseModelCompat
+from pydantic import Field
-class DictConvertibleModel(BaseModel):
+class DictConvertibleModel(_BaseModelCompat):
"""A pydantic model to/from dict conversion in which the dict
representation is intentionally different from pydantics' own dict
conversions.
diff --git a/requirements.txt b/requirements.txt
index 240f1108b2..011a295ad6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,4 +20,4 @@ matplotlib >= 3.0.3
pyarrow >= 0.16
responses >= 0.12.0
urllib3 >= 1.25.10
-fastapi < 0.100
+fastapi
diff --git a/tests/test_butler.py b/tests/test_butler.py
index 307cad7573..433c692a63 100644
--- a/tests/test_butler.py
+++ b/tests/test_butler.py
@@ -42,13 +42,14 @@
try:
import boto3
import botocore
+ from lsst.resources.s3utils import setAwsEnvCredentials, unsetAwsEnvCredentials
from moto import mock_s3 # type: ignore[import]
except ImportError:
boto3 = None
- def mock_s3(cls): # type: ignore[no-untyped-def]
+ def mock_s3(*args: Any, **kwargs: Any) -> Any: # type: ignore[no-untyped-def]
"""No-op decorator in case moto mock_s3 can not be imported."""
- return cls
+ return None
try:
@@ -93,7 +94,6 @@ def mock_s3(cls): # type: ignore[no-untyped-def]
from lsst.daf.butler.tests import MetricsExample, MultiDetectorFormatter
from lsst.daf.butler.tests.utils import TestCaseMixin, makeTestTempDir, removeTestTempDir, safeTestTempDir
from lsst.resources import ResourcePath
-from lsst.resources.s3utils import setAwsEnvCredentials, unsetAwsEnvCredentials
from lsst.utils import doImportType
from lsst.utils.introspection import get_full_type_name
diff --git a/tests/test_obscore.py b/tests/test_obscore.py
index bf213496f3..e32866d345 100644
--- a/tests/test_obscore.py
+++ b/tests/test_obscore.py
@@ -262,7 +262,7 @@ def test_config_errors(self):
def test_schema(self):
"""Check how obscore schema is constructed"""
- config = ObsCoreConfig(obs_collection="", dataset_types=[], facility_name="FACILITY")
+ config = ObsCoreConfig(obs_collection="", dataset_types={}, facility_name="FACILITY")
schema = ObsCoreSchema(config, [])
table_spec = schema.table_spec
self.assertEqual(list(table_spec.fields.names), [col.name for col in _STATIC_COLUMNS])
@@ -271,7 +271,7 @@ def test_schema(self):
config = ObsCoreConfig(
obs_collection="",
extra_columns={"c1": 1, "c2": "string", "c3": {"template": "{calib_level}", "type": "float"}},
- dataset_types=[],
+ dataset_types={},
facility_name="FACILITY",
)
schema = ObsCoreSchema(config, [])
diff --git a/tests/test_parquet.py b/tests/test_parquet.py
index 6fa77d9390..2ba8a10b29 100644
--- a/tests/test_parquet.py
+++ b/tests/test_parquet.py
@@ -43,7 +43,7 @@
try:
import pandas as pd
except ImportError:
- np = None
+ pd = None
from lsst.daf.butler import (
Butler,
@@ -54,31 +54,52 @@
StorageClassConfig,
StorageClassFactory,
)
-from lsst.daf.butler.delegates.arrowastropy import ArrowAstropyDelegate
-from lsst.daf.butler.delegates.arrownumpy import ArrowNumpyDelegate
-from lsst.daf.butler.delegates.arrowtable import ArrowTableDelegate
-from lsst.daf.butler.delegates.dataframe import DataFrameDelegate
-from lsst.daf.butler.formatters.parquet import (
- ArrowAstropySchema,
- ArrowNumpySchema,
- DataFrameSchema,
- ParquetFormatter,
- _append_numpy_multidim_metadata,
- _astropy_to_numpy_dict,
- _numpy_dict_to_numpy,
- _numpy_dtype_to_arrow_types,
- _numpy_style_arrays_to_arrow_arrays,
- _numpy_to_numpy_dict,
- arrow_to_astropy,
- arrow_to_numpy,
- arrow_to_numpy_dict,
- arrow_to_pandas,
- astropy_to_arrow,
- compute_row_group_size,
- numpy_dict_to_arrow,
- numpy_to_arrow,
- pandas_to_arrow,
-)
+
+try:
+ from lsst.daf.butler.delegates.arrowastropy import ArrowAstropyDelegate
+except ImportError:
+ atable = None
+ pa = None
+try:
+ from lsst.daf.butler.delegates.arrownumpy import ArrowNumpyDelegate
+except ImportError:
+ np = None
+ pa = None
+try:
+ from lsst.daf.butler.delegates.arrowtable import ArrowTableDelegate
+except ImportError:
+ pa = None
+try:
+ from lsst.daf.butler.delegates.dataframe import DataFrameDelegate
+except ImportError:
+ pd = None
+try:
+ from lsst.daf.butler.formatters.parquet import (
+ ArrowAstropySchema,
+ ArrowNumpySchema,
+ DataFrameSchema,
+ ParquetFormatter,
+ _append_numpy_multidim_metadata,
+ _astropy_to_numpy_dict,
+ _numpy_dict_to_numpy,
+ _numpy_dtype_to_arrow_types,
+ _numpy_style_arrays_to_arrow_arrays,
+ _numpy_to_numpy_dict,
+ arrow_to_astropy,
+ arrow_to_numpy,
+ arrow_to_numpy_dict,
+ arrow_to_pandas,
+ astropy_to_arrow,
+ compute_row_group_size,
+ numpy_dict_to_arrow,
+ numpy_to_arrow,
+ pandas_to_arrow,
+ )
+except ImportError:
+ pa = None
+ pd = None
+ atable = None
+ np = None
from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir
TESTDIR = os.path.abspath(os.path.dirname(__file__))
diff --git a/tests/test_postgresql.py b/tests/test_postgresql.py
index b1e5d573bc..c557dd225a 100644
--- a/tests/test_postgresql.py
+++ b/tests/test_postgresql.py
@@ -19,6 +19,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from __future__ import annotations
+
import gc
import itertools
import os
@@ -41,7 +43,11 @@
import sqlalchemy
from lsst.daf.butler import Timespan, ddl
from lsst.daf.butler.registry import _ButlerRegistry, _RegistryFactory
-from lsst.daf.butler.registry.databases.postgresql import PostgresqlDatabase, _RangeTimespanType
+
+try:
+ from lsst.daf.butler.registry.databases.postgresql import PostgresqlDatabase, _RangeTimespanType
+except ImportError:
+ testing = None
from lsst.daf.butler.registry.tests import DatabaseTests, RegistryTests
from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir