diff --git a/migrations/_oneshot/datasets/int_1.0.0_to_uuid_1.0.0/2101fbf51ad3.py b/migrations/_oneshot/datasets/int_1.0.0_to_uuid_1.0.0/2101fbf51ad3.py index 3d70e7e..07fce08 100644 --- a/migrations/_oneshot/datasets/int_1.0.0_to_uuid_1.0.0/2101fbf51ad3.py +++ b/migrations/_oneshot/datasets/int_1.0.0_to_uuid_1.0.0/2101fbf51ad3.py @@ -151,7 +151,7 @@ def upgrade() -> None: # drop mapping table _LOG.debug("Dropping mapping table") - op.drop_table(ID_MAP_TABLE_NAME, schema) + op.drop_table(ID_MAP_TABLE_NAME, schema=schema) # refresh schema from database metadata = sa.schema.MetaData(schema=schema) diff --git a/migrations/datasets/4e2d7a28475b.py b/migrations/datasets/4e2d7a28475b.py index 624567c..ff04df5 100644 --- a/migrations/datasets/4e2d7a28475b.py +++ b/migrations/datasets/4e2d7a28475b.py @@ -122,8 +122,8 @@ def _migrate_default( # There may be very many records in dataset table to fit everything in # memory, so split the whole thing on dataset_type_id. query = sa.select(table.columns["dataset_type_id"]).select_from(table).distinct() - result = bind.execute(query).scalars() - dataset_type_ids = sorted(result) + scalars = bind.execute(query).scalars() + dataset_type_ids = sorted(scalars) _LOG.info("Found %s dataset types in dataset table", len(dataset_type_ids)) for dataset_type_id in dataset_type_ids: @@ -140,8 +140,8 @@ def _migrate_default( iterator = iter(rows) count = 0 while chunk := list(itertools.islice(iterator, 1000)): - query = tmp_table.insert().values(chunk) - result = bind.execute(query) + insert = tmp_table.insert().values(chunk) + result = bind.execute(insert) count += result.rowcount _LOG.info("Inserted %s rows into temporary table", count) @@ -156,12 +156,12 @@ def _migrate_default( ) # Update ingest date from a temporary table. - query = table.update().values( + update = table.update().values( ingest_date=sa.select(tmp_table.columns["ingest_date"]) .where(tmp_table.columns["id"] == table.columns["id"]) .scalar_subquery() ) - result = bind.execute(query) + result = bind.execute(update) _LOG.info("Updated %s rows in dataset table", result.rowcount) # Update manager schema version. diff --git a/migrations/obscore-config/4fe28ef5030f.py b/migrations/obscore-config/4fe28ef5030f.py index bbb712c..cd6e26c 100644 --- a/migrations/obscore-config/4fe28ef5030f.py +++ b/migrations/obscore-config/4fe28ef5030f.py @@ -7,6 +7,7 @@ """ import json +from typing import TYPE_CHECKING import yaml from alembic import context, op @@ -15,6 +16,9 @@ from lsst.daf.butler_migrate.registry import make_registry from lsst.utils import doImportType +if TYPE_CHECKING: + from lsst.daf.butler.registry.obscore import ObsCoreLiveTableManager + # revision identifiers, used by Alembic. revision = "4fe28ef5030f" down_revision = "2daeabfb5019" @@ -142,7 +146,7 @@ def _make_obscore_table(obscore_config: dict) -> None: manager_class_name = attributes.get("config:registry.managers.obscore") if manager_class_name is None: raise ValueError("Registry obscore manager has to be configured in butler_attributes") - manager_class = doImportType(manager_class_name) + manager_class: type[ObsCoreLiveTableManager] = doImportType(manager_class_name) repository = context.config.get_section_option("daf_butler_migrate", "repository") assert repository is not None, "Need repository in configuration" @@ -154,7 +158,7 @@ def _make_obscore_table(obscore_config: dict) -> None: database = registry._db managers = registry._managers with database.declareStaticTables(create=False) as staticTablesContext: - manager = manager_class.initialize( + manager: ObsCoreLiveTableManager = manager_class.initialize( # type: ignore[assignment] database, staticTablesContext, universe=registry.dimensions,