From 44babd948387da2873ddbd737546541ec7f20fa8 Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Tue, 23 Apr 2024 11:42:37 -0700 Subject: [PATCH 1/4] Update pre-commit config --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b157f24..4d00dab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,12 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 24.2.0 + rev: 24.4.0 hooks: - id: black # It is recommended to specify the latest version of Python @@ -21,6 +21,6 @@ repos: name: isort (python) - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.3.0 + rev: v0.4.1 hooks: - id: ruff From 7384cd9abc3fce17643a69cf4e94b36ec4e8a09d Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Tue, 23 Apr 2024 11:43:59 -0700 Subject: [PATCH 2/4] Update timespan module after Timespan class changes --- python/lsst/daf/butler_migrate/timespan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/lsst/daf/butler_migrate/timespan.py b/python/lsst/daf/butler_migrate/timespan.py index 22aa5a7..b71d057 100644 --- a/python/lsst/daf/butler_migrate/timespan.py +++ b/python/lsst/daf/butler_migrate/timespan.py @@ -77,7 +77,7 @@ def format_timespan_value(timespan: Timespan, column_name: str, dialect: str) -> values : `dict` [ `str`, `typing.Any` ] Mapping from column name to value for that column. """ - nanoseconds = timespan.to_simple() + nanoseconds = timespan.nsec if dialect == "postgresql": return {column_name: Range(*nanoseconds)} elif dialect == "sqlite": From 00bd36d424ff34f389592dc1f1975517283230b6 Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Tue, 23 Apr 2024 12:09:23 -0700 Subject: [PATCH 3/4] Improve support for sqlite registries (DM-44007) Expanded test_dimensions_json tests to include all supported universe versions, added Postgres tests and tests for schema downgrades. Updated two migration scripts to work with sqlite backend. --- migrations/dimensions-config/2a8a32e1bec3.py | 14 +- migrations/dimensions-config/c5ae3a2cd7c2.py | 22 +- .../butler_migrate/_dimensions_json_utils.py | 31 ++- tests/test_dimensions_json.py | 259 ++++++++++++++---- 4 files changed, 266 insertions(+), 60 deletions(-) diff --git a/migrations/dimensions-config/2a8a32e1bec3.py b/migrations/dimensions-config/2a8a32e1bec3.py index bcf01a1..75926cc 100644 --- a/migrations/dimensions-config/2a8a32e1bec3.py +++ b/migrations/dimensions-config/2a8a32e1bec3.py @@ -5,6 +5,7 @@ Create Date: 2024-02-20 14:49:26.435042 """ + import logging import sqlalchemy @@ -92,7 +93,14 @@ def _update_config(config: dict) -> dict: # Actual schema change. for table, column in table_columns: _LOG.info("Alter %s.%s column type to %s", table, column, new_type) - op.alter_column(table, column, type_=new_type, schema=schema) + with op.batch_alter_table(table, schema=schema) as batch_op: + batch_op.alter_column(column, type_=new_type) + if op.get_bind().dialect.name == "sqlite" and table == "instrument": + # SQLite uses special check constraint. + constraint_name = "instrument_len_name" + batch_op.drop_constraint(constraint_name) + constraint = f'length("{column}")<={size} AND length("{column}")>=1' + batch_op.create_check_constraint(constraint_name, sqlalchemy.text(constraint)) # Update attributes assert mig_context.bind is not None @@ -141,6 +149,10 @@ def _lock_tables(tables: list[str], schema: str) -> None: """Lock all tables that need to be migrated to avoid conflicts.""" connection = op.get_bind() + if connection.dialect.name == "sqlite": + # SQLite does not support LOCK TABLE. + return + for table in tables: # We do not need quoting for schema/table names. if schema: diff --git a/migrations/dimensions-config/c5ae3a2cd7c2.py b/migrations/dimensions-config/c5ae3a2cd7c2.py index b8fa566..7b7fa79 100644 --- a/migrations/dimensions-config/c5ae3a2cd7c2.py +++ b/migrations/dimensions-config/c5ae3a2cd7c2.py @@ -5,6 +5,7 @@ Create Date: 2022-11-25 12:04:18.424257 """ + import sqlalchemy as sa from alembic import context, op from lsst.daf.butler_migrate.butler_attributes import ButlerAttributes @@ -26,15 +27,15 @@ def upgrade() -> None: - Change observation_reason column size for visit and exposure tables. - For sqlite backend update check constraint for new column size. """ - _migrate(2, 3, 68) + _migrate(2, 3, 68, 32) def downgrade() -> None: """Undo migration.""" - _migrate(3, 2, 32) + _migrate(3, 2, 32, 68) -def _migrate(old_version: int, new_version: int, column_size: int) -> None: +def _migrate(old_version: int, new_version: int, column_size: int, old_column_size: int) -> None: mig_context = context.get_context() # When we use schemas in postgres then all tables belong to the same schema @@ -67,7 +68,13 @@ def _update_config(config: dict) -> dict: with op.batch_alter_table(table_name, schema=schema) as batch_op: # change column type column = "observation_reason" - column_type = sa.String(column_size) + column_type: sa.types.TypeEngine + if column_size > 32: + # daf_butler uses Text for all string columns longer than 32 + # characters. + column_type = sa.Text() + else: + column_type = sa.String(column_size) batch_op.alter_column(column, type_=column_type) # type: ignore[attr-defined] assert mig_context.bind is not None, "Requires an existing bind" @@ -75,5 +82,8 @@ def _update_config(config: dict) -> dict: # For sqlite we also define check constraint constraint_name = f"{table_name}_len_{column}" constraint = f'length("{column}")<={column_size} AND length("{column}")>=1' - batch_op.drop_constraint(constraint_name) # type: ignore[attr-defined] - batch_op.create_check_constraint(constraint_name, sa.text(constraint)) # type: ignore + if old_column_size <= 32: + # Constraint only exists for shorter strings. + batch_op.drop_constraint(constraint_name) # type: ignore[attr-defined] + if column_size <= 32: + batch_op.create_check_constraint(constraint_name, sa.text(constraint)) # type: ignore diff --git a/python/lsst/daf/butler_migrate/_dimensions_json_utils.py b/python/lsst/daf/butler_migrate/_dimensions_json_utils.py index 6a888f5..9f145ae 100644 --- a/python/lsst/daf/butler_migrate/_dimensions_json_utils.py +++ b/python/lsst/daf/butler_migrate/_dimensions_json_utils.py @@ -22,6 +22,30 @@ import difflib import json +import yaml +from lsst.resources import ResourcePath + + +def historical_dimensions_resource(universe_version: int, namespace: str = "daf_butler") -> ResourcePath: + """Return location of the dimensions configuration for a specific version. + + Parameters + ---------- + universe_version : `int` + Version number of the universe to be loaded. + namespace : `str`, optional + Configuration namespace. + + Returns + ------- + path : `lsst.resources.ResourcePath` + Location of the configuration, there is no guarantee that this resource + actually exists. + """ + return ResourcePath( + f"resource://lsst.daf.butler/configs/old_dimensions/{namespace}_universe{universe_version}.yaml" + ) + def load_historical_dimension_universe_json(universe_version: int) -> str: """Load a specific version of the default dimension universe as JSON. @@ -36,12 +60,7 @@ def load_historical_dimension_universe_json(universe_version: int) -> str: universe : `str` Dimension universe configuration encoded as a JSON string. """ - import yaml - from lsst.resources import ResourcePath - - path = ResourcePath( - f"resource://lsst.daf.butler/configs/old_dimensions/daf_butler_universe{universe_version}.yaml" - ) + path = historical_dimensions_resource(universe_version) with path.open() as input: dimensions = yaml.safe_load(input) return json.dumps(dimensions) diff --git a/tests/test_dimensions_json.py b/tests/test_dimensions_json.py index 15ee3c2..15dbbfc 100644 --- a/tests/test_dimensions_json.py +++ b/tests/test_dimensions_json.py @@ -19,27 +19,61 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import contextlib +import gc import os +import tempfile import unittest +from typing import TYPE_CHECKING, Any +import sqlalchemy +import yaml from lsst.daf.butler import Butler, Config from lsst.daf.butler.direct_butler import DirectButler from lsst.daf.butler.registry.sql_registry import SqlRegistry from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir from lsst.daf.butler.transfers import YamlRepoImportBackend from lsst.daf.butler_migrate import butler_attributes, database, migrate, script +from lsst.daf.butler_migrate._dimensions_json_utils import historical_dimensions_resource +from lsst.daf.butler_migrate.revision import rev_id + +try: + import testing.postgresql # type: ignore[import-untyped] +except ImportError: + testing = None + +if TYPE_CHECKING: + + class TestCaseMixin(unittest.TestCase): + """Base class for mixin test classes that use TestCase methods.""" + +else: + + class TestCaseMixin: + """Do-nothing definition of mixin base class for regular execution.""" + TESTDIR = os.path.abspath(os.path.dirname(__file__)) _NAMESPACE = "daf_butler" -# alembic revisions -_REVISION_V0 = "f3bcee34f344" -_REVISION_V1 = "380002bcbb26" -_REVISION_V2 = "bf6308af80aa" +_MANAGER = "dimensions-config" + +def _revision_id(version: int, namespace: str = "daf_butler") -> str: + """Return alembic revision name.""" + return rev_id(_MANAGER, namespace, str(version)) -class DimensionsJsonTestCase(unittest.TestCase): + +def _make_universe(version: int) -> Config: + """Load dimensions universe for specific version.""" + path = historical_dimensions_resource(version) + with path.open() as input: + dimensions = yaml.safe_load(input) + return Config(dimensions) + + +class DimensionsJsonTestCase(TestCaseMixin): """Tests for migrating of dimensions.json stored configuration.""" def setUp(self) -> None: @@ -49,21 +83,81 @@ def setUp(self) -> None: def tearDown(self) -> None: removeTestTempDir(self.root) - def make_butler_v0(self) -> Config: - """Make new Butler instance with dimensions config v0.""" - dimensions = os.path.join(TESTDIR, "config", "dimensions-v0.yaml") - config = Butler.makeRepo(self.root, dimensionConfig=dimensions) + def _butler_config(self) -> Config | None: + """Make configuration for creating new Butlers.""" + raise NotImplementedError() + + def make_butler(self, version: int, **kw: Any) -> str: + """Make a Butler instance with universe of specific version.""" + dimensions = _make_universe(version) + # Use unique folder for each butler so we can create many butlers + # in one unit test. + butler_root = tempfile.mkdtemp(dir=self.root) + config = self._butler_config() + Butler.makeRepo(butler_root, config=config, dimensionConfig=dimensions) # Need to stamp current versions into alembic. script.migrate_stamp( - repo=self.root, + repo=butler_root, mig_path=self.mig_path, purge=False, dry_run=False, - namespace=_NAMESPACE, + namespace=_NAMESPACE if version == 0 else None, manager=None, ) - self.db = database.Database.from_repo(self.root) - return config + return butler_root + + def _upgrade_one(self, start_version: int) -> None: + """Test version upgrade from N to N+1.""" + butler_root = self.make_butler(start_version) + db = database.Database.from_repo(butler_root) + + versions = db.manager_versions(_NAMESPACE) + self.assertEqual(versions[_MANAGER], (_NAMESPACE, str(start_version), _revision_id(start_version))) + + # Extra version-specific options are needed/ + namespace = None + options = None + if start_version == 0: + namespace = _NAMESPACE + elif start_version == 1: + options = {"has_simulated": "0"} + + script.migrate_upgrade( + repo=butler_root, + revision=_revision_id(start_version + 1), + mig_path=self.mig_path, + one_shot_tree="", + sql=False, + namespace=namespace, + options=options, + ) + + versions = db.manager_versions(_NAMESPACE) + self.assertEqual( + versions[_MANAGER], (_NAMESPACE, str(start_version + 1), _revision_id(start_version + 1)) + ) + + def _downgrade_one(self, start_version: int) -> None: + """Test version downgrade from N to N-1.""" + butler_root = self.make_butler(start_version) + db = database.Database.from_repo(butler_root) + + versions = db.manager_versions(_NAMESPACE) + self.assertEqual(versions[_MANAGER], (_NAMESPACE, str(start_version), _revision_id(start_version))) + + script.migrate_downgrade( + repo=butler_root, + revision=_revision_id(start_version - 1), + mig_path=self.mig_path, + one_shot_tree="", + sql=False, + namespace=None, + ) + + versions = db.manager_versions(_NAMESPACE) + self.assertEqual( + versions[_MANAGER], (_NAMESPACE, str(start_version - 1), _revision_id(start_version - 1)) + ) def load_data(self, registry: SqlRegistry, filename: str) -> None: """Load registry test data from filename in data folder.""" @@ -72,44 +166,63 @@ def load_data(self, registry: SqlRegistry, filename: str) -> None: backend.register() backend.load(datastore=None) + def test_upgrade_empty(self) -> None: + """Simple test for incremental upgrades for all known versions. This + only tests schema changes with empty registry. More specific test can + load data to verify that data migration also works OK. + """ + for start_version in range(6): + with self.subTest(version=start_version): + self._upgrade_one(start_version) + + def test_downgrade_empty(self) -> None: + """Simple test for downgrades for all known versions. This only tests + schema changes with empty registry. + """ + for start_version in range(6): + with self.subTest(version=start_version): + with contextlib.suppress(NotImplementedError): + self._downgrade_one(start_version + 1) + def test_upgrade_v1(self) -> None: """Test for upgrade/downgrade between v0 and v1. No actual schema change in this migration, only check that contents of ``dimensions.json`` is updated. """ - self.make_butler_v0() + butler_root = self.make_butler(0) + db = database.Database.from_repo(butler_root) - self.assertIsNone(self.db.dimensions_namespace()) - versions = self.db.manager_versions(_NAMESPACE) - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "0", _REVISION_V0)) + self.assertIsNone(db.dimensions_namespace()) + versions = db.manager_versions(_NAMESPACE) + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "0", _revision_id(0))) # Upgrade to v1. script.migrate_upgrade( - repo=self.root, - revision=_REVISION_V1, + repo=butler_root, + revision=_revision_id(1), mig_path=self.mig_path, one_shot_tree="", sql=False, namespace=_NAMESPACE, options=None, ) - self.assertEqual(self.db.dimensions_namespace(), _NAMESPACE) - versions = self.db.manager_versions() - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "1", _REVISION_V1)) + self.assertEqual(db.dimensions_namespace(), _NAMESPACE) + versions = db.manager_versions() + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "1", _revision_id(1))) # Downgrade back to v0. script.migrate_downgrade( - repo=self.root, - revision=_REVISION_V0, + repo=butler_root, + revision=_revision_id(0), mig_path=self.mig_path, one_shot_tree="", sql=False, namespace=_NAMESPACE, ) - self.assertIsNone(self.db.dimensions_namespace()) - versions = self.db.manager_versions(_NAMESPACE) - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "0", _REVISION_V0)) + self.assertIsNone(db.dimensions_namespace()) + versions = db.manager_versions(_NAMESPACE) + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "0", _revision_id(0))) def test_upgrade_v2(self) -> None: """Test for upgrade from v0 to v2. @@ -117,13 +230,14 @@ def test_upgrade_v2(self) -> None: Loads some dimension records and verifies that data is migrated correctly. """ - config = self.make_butler_v0() + butler_root = self.make_butler(0) + db = database.Database.from_repo(butler_root) - self.assertIsNone(self.db.dimensions_namespace()) - versions = self.db.manager_versions(_NAMESPACE) - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "0", _REVISION_V0)) + self.assertIsNone(db.dimensions_namespace()) + versions = db.manager_versions(_NAMESPACE) + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "0", _revision_id(0))) - butler = Butler.from_config(config, writeable=True) + butler = Butler(butler_root, writeable=True) # type: ignore[abstract] assert isinstance(butler, DirectButler), "Only DirectButler is supported" self.load_data(butler._registry, "records.yaml") @@ -141,33 +255,33 @@ def test_upgrade_v2(self) -> None: # Upgrade to v1. We could upgrade to v2 in one step but I want to check # different arguments at each step. script.migrate_upgrade( - repo=self.root, - revision=_REVISION_V1, + repo=butler_root, + revision=_revision_id(1), mig_path=self.mig_path, one_shot_tree="", sql=False, namespace=_NAMESPACE, options=None, ) - self.assertEqual(self.db.dimensions_namespace(), _NAMESPACE) - versions = self.db.manager_versions() - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "1", _REVISION_V1)) + self.assertEqual(db.dimensions_namespace(), _NAMESPACE) + versions = db.manager_versions() + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "1", _revision_id(1))) # Upgrade to v2. script.migrate_upgrade( - repo=self.root, - revision=_REVISION_V2, + repo=butler_root, + revision=_revision_id(2), mig_path=self.mig_path, one_shot_tree="", sql=False, namespace=None, options={"has_simulated": "0"}, ) - self.assertEqual(self.db.dimensions_namespace(), _NAMESPACE) - versions = self.db.manager_versions() - self.assertEqual(versions["dimensions-config"], (_NAMESPACE, "2", _REVISION_V2)) + self.assertEqual(db.dimensions_namespace(), _NAMESPACE) + versions = db.manager_versions() + self.assertEqual(versions[_MANAGER], (_NAMESPACE, "2", _revision_id(2))) - butler = Butler.from_config(config, writeable=False) + butler = Butler(butler_root, writeable=False) # type: ignore[abstract] # Check records for v2 attributes. records = list(butler.registry.queryDimensionRecords("instrument")) @@ -201,17 +315,68 @@ def test_upgrade_v2(self) -> None: ) def test_validate_dimensions_json(self) -> None: - self.make_butler_v0() + butler_root = self.make_butler(0) + db = database.Database.from_repo(butler_root) universe = 5 - with self.db.connect() as connection: - attribs = butler_attributes.ButlerAttributes(connection) + with db.connect() as connection: + attribs = butler_attributes.ButlerAttributes(connection, schema=db.schema) with self.assertRaisesRegex( ValueError, "dimensions.json stored in database does not match expected" ): attribs.validate_dimensions_json(universe) attribs.replace_dimensions_json(universe) - self.assertIsNone(attribs.validate_dimensions_json(universe)) + attribs.validate_dimensions_json(universe) + + +class SQLiteDimensionsJsonTestCase(DimensionsJsonTestCase, unittest.TestCase): + """Test using SQLite backend.""" + + def _butler_config(self) -> Config | None: + return None + + +@unittest.skipUnless(testing is not None, "testing.postgresql module not found") +class PostgresDimensionsJsonTestCase(DimensionsJsonTestCase, unittest.TestCase): + """Test using Postgres backend.""" + + postgresql: Any + + @classmethod + def _handler(cls, postgresql: Any) -> None: + engine = sqlalchemy.engine.create_engine(postgresql.url()) + with engine.begin() as connection: + connection.execute(sqlalchemy.text("CREATE EXTENSION btree_gist;")) + + @classmethod + def setUpClass(cls) -> None: + # Create the postgres test server. + cls.postgresql = testing.postgresql.PostgresqlFactory( + cache_initialized_db=True, on_initialized=cls._handler + ) + super().setUpClass() + + @classmethod + def tearDownClass(cls) -> None: + # Clean up any lingering SQLAlchemy engines/connections + # so they're closed before we shut down the server. + gc.collect() + cls.postgresql.clear_cache() + super().tearDownClass() + + def setUp(self) -> None: + super().setUp() + self.server = self.postgresql() + self.count = 0 + + def _butler_config(self) -> Config | None: + # Use unique namespace for each instance, some tests may use sub-tests. + self.count += 1 + reg_config = { + "db": self.server.url(), + "namespace": f"namespace{self.count}", + } + return Config({"registry": reg_config}) if __name__ == "__main__": From b2b72119b8bfd3555f85180d603cb37365076d05 Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Tue, 23 Apr 2024 12:18:23 -0700 Subject: [PATCH 4/4] Remove unused dimensions config file --- tests/config/dimensions-v0.yaml | 440 -------------------------------- 1 file changed, 440 deletions(-) delete mode 100644 tests/config/dimensions-v0.yaml diff --git a/tests/config/dimensions-v0.yaml b/tests/config/dimensions-v0.yaml deleted file mode 100644 index eb1c66c..0000000 --- a/tests/config/dimensions-v0.yaml +++ /dev/null @@ -1,440 +0,0 @@ -version: 0 -skypix: - # 'common' is the skypix system and level used to relate all other spatial - # dimensions. Its value is a string formed by concatenating one of the - # other keys under the 'skypix' headerin (i.e. the name of a skypix system) - # with an integer level (with no zero-padding). - common: htm7 - htm: - class: lsst.sphgeom.HtmPixelization - max_level: 24 - -elements: - instrument: - doc: > - An entity that produces observations. An instrument defines a set of - physical_filters and detectors and a numbering system for the exposures - and visits that represent observations with it. - keys: - - - name: name - type: string - length: 16 - metadata: - - - name: visit_max - type: int - doc: > - Maximum value for the 'visit' field for visits associated with - this instrument (exclusive). - - - name: exposure_max - type: int - doc: > - Maximum value for the 'exposure' field for exposures associated with - this instrument (exclusive). - - - name: detector_max - type: int - doc: > - Maximum value for the 'detector' field for detectors associated with - this instrument (exclusive). - - - name: class_name - type: string - length: 64 - doc: > - Full class name of the Instrument class associated with this - instrument. - governor: true - storage: - cls: lsst.daf.butler.registry.dimensions.governor.BasicGovernorDimensionRecordStorage - - band: - doc: > - A filter that is not associated with a particular instrument. An - abstract filter can be used to relate similar physical filters, and - is typically the filter associated with coadds. - keys: - - - name: name - type: string - length: 32 - storage: - cls: lsst.daf.butler.registry.dimensions.caching.CachingDimensionRecordStorage - nested: - cls: lsst.daf.butler.registry.dimensions.query.QueryDimensionRecordStorage - view_of: physical_filter - - physical_filter: - doc: > - A filter associated with a particular instrument. physical_filters are - used to identify datasets that can only be associated with a single - observation. - keys: - - - name: name - type: string - length: 32 - requires: - - instrument - implies: - - band - storage: - cls: lsst.daf.butler.registry.dimensions.caching.CachingDimensionRecordStorage - nested: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - subfilter: - doc: > - A mathematical division of an band. Subfilters are used to - model wavelength-dependent effects such as differential chromatic - refraction. - keys: - - - name: id - type: int - requires: - - band - storage: - cls: lsst.daf.butler.registry.dimensions.caching.CachingDimensionRecordStorage - nested: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - detector: - doc: > - A detector associated with a particular instrument (not an observation - of that detector; that requires specifying an exposure or visit as - well). - keys: - - - name: id - type: int - - - name: full_name - type: string - length: 32 - requires: [instrument] - metadata: - - - name: name_in_raft - type: string - length: 32 - - - name: raft - type: string - length: 32 - doc: > - A string name for a group of detectors with an instrument-dependent - interpretation. - - - name: purpose - type: string - length: 32 - doc: > - Role of the detector; typically one of "SCIENCE", "WAVEFRONT", - or "GUIDE", though instruments may define additional values. - storage: - cls: lsst.daf.butler.registry.dimensions.caching.CachingDimensionRecordStorage - nested: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - visit: - doc: > - A sequence of observations processed together, comprised of one or - more exposures from the same instrument with the same pointing and - physical_filter. - The visit table contains metadata that is both meaningful only for - science exposures and the same for all exposures in a visit. - keys: - - - name: id - type: int - - - name: name - type: string - length: 64 - requires: [instrument] - implies: [physical_filter, visit_system] - metadata: - - - name: day_obs - type: int - doc: > - Day of observation as defined by the observatory (YYYYMMDD format). - If a visit crosses multiple days this entry will be the earliest - day of any of the exposures that make up the visit. - - - name: exposure_time - type: float - doc: > - The total exposure time of the visit in seconds. This should - be equal to the sum of the exposure_time values for all - constituent exposures (i.e. it should not include time between - exposures). - - - name: target_name - type: string - length: 64 - doc: Object of interest for this visit or survey field name. - - - name: observation_reason - type: string - length: 32 - doc: > - The reason this visit was taken. (e.g. science, - filter scan, unknown, various). - - - name: science_program - type: string - length: 64 - doc: Observing program (survey or proposal) identifier. - - - name: zenith_angle - type: float - doc: > - Approximate zenith angle in degrees during the visit. - Can only be approximate since it is continuously changing during - and observation and multiple visits can be combined from a - relatively long period. - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - exposure: - doc: > - An observation associated with a particular instrument. All direct - observations are identified with an exposure, but derived datasets - that may be based on more than one exposure (e.g. multiple snaps) are - typically identified with visits instead, even for instruments that - don't have multiple exposures per visit. As a result, instruments - that don't have multiple exposures per visit will typically have visit - entries that are essentially duplicates of their exposure entries. - - The exposure table contains metadata entries that are relevant for - calibration exposures, and does not duplicate entries in visit that - would be the same for all exposures within a visit with the exception - of the exposure.group entry. - keys: - - - name: id - type: int - - - name: obs_id - type: string - length: 64 - requires: [instrument] - implies: [physical_filter] - metadata: - - - name: exposure_time - type: float - doc: Duration of the exposure with shutter open (seconds). - - - name: dark_time - type: float - doc: Duration of the exposure with shutter closed (seconds). - - - name: observation_type - type: string - length: 16 - doc: The observation type of this exposure (e.g. dark, bias, science). - - - name: observation_reason - type: string - length: 32 - doc: > - The reason this observation was taken. (e.g. science, - filter scan, unknown). - - - name: day_obs - type: int - doc: > - Day of observation as defined by the observatory (YYYYMMDD format). - - - name: seq_num - type: int - doc: > - Counter for the observation within a larger sequence. Context - of the sequence number is observatory specific. Can be - a global counter or counter within day_obs. - - - name: group_name - type: string - length: 64 - doc: > - String group identifier associated with this exposure by the - acquisition system. - - - name: group_id - type: int - doc: > - Integer group identifier associated with this exposure by the - acquisition system. - - - name: target_name - type: string - length: 64 - doc: Object of interest for this observation or survey field name. - - - name: science_program - type: string - length: 64 - doc: > - Observing program (survey, proposal, engineering project) - identifier. - - - name: tracking_ra - type: float - doc: > - Tracking ICRS Right Ascension of boresight in degrees. Can be NULL - for observations that are not on sky. - - - name: tracking_dec - type: float - doc: > - Tracking ICRS Declination of boresight in degrees. Can be NULL for - observations that are not on sky. - - - name: sky_angle - type: float - doc: > - Angle of the instrument focal plane on the sky in degrees. Can - be NULL for observations that are not on sky, or for observations - where the sky angle changes during the observation. - - - name: zenith_angle - type: float - doc: > - Angle in degrees from the zenith at the start of the exposure. - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - skymap: - doc: > - A set of tracts and patches that subdivide the sky into rectangular - regions with simple projections and intentional overlaps. - keys: - - - name: name - type: string - length: 64 - - - name: hash - type: hash - nbytes: 40 - doc: > - A hash of the skymap's parameters. - metadata: - - name: tract_max - type: int - doc: > - Maximum ID for tracts in this skymap, exclusive. - - name: patch_nx_max - type: int - doc: > - Number of patches in the x direction in each tract. - - name: patch_ny_max - type: int - doc: > - Number of patches in the y direction in each tract. - governor: true - storage: - cls: lsst.daf.butler.registry.dimensions.governor.BasicGovernorDimensionRecordStorage - - tract: - doc: > - A large rectangular region mapped to the sky with a single map - projection, associated with a particular skymap. - keys: - - - name: id - type: int - requires: [skymap] - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - patch: - doc: > - A rectangular region within a tract. - keys: - - - name: id - type: int - requires: [skymap, tract] - metadata: - - - name: cell_x - type: int - nullable: false - doc: > - Which column this patch occupies in the tract's grid of patches. - - - name: cell_y - type: int - nullable: false - doc: > - Which row this patch occupies in the tract's grid of patches. - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - visit_detector_region: - doc: > - A many-to-many join table that provides region information for - visit-detector combinations. - requires: [visit, detector] - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - visit_system: - doc: > - A system of self-consistent visit definitions, within which each - exposure should appear at most once. - keys: - - - name: id - type: int - - - name: name - type: string - length: 32 - requires: [instrument] - storage: - cls: lsst.daf.butler.registry.dimensions.caching.CachingDimensionRecordStorage - nested: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - - visit_definition: - doc: > - A many-to-many join table that relates exposures to the visits they - belong to. - requires: [exposure, visit_system] - implies: [visit] - always_join: true - storage: - cls: lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage - -topology: - spatial: - observation_regions: [visit_detector_region, visit] - skymap_regions: [patch, tract] - - temporal: - observation_timespans: [exposure, visit] - -packers: - visit_detector: - fixed: [instrument] - dimensions: [instrument, visit, detector] - cls: lsst.daf.butler.instrument.ObservationDimensionPacker - exposure_detector: - fixed: [instrument] - dimensions: [instrument, exposure, detector] - cls: lsst.daf.butler.instrument.ObservationDimensionPacker - tract_patch: - fixed: [skymap] - dimensions: [skymap, tract, patch] - cls: lsst.skymap.packers.SkyMapDimensionPacker - tract_patch_band: - fixed: [skymap] - dimensions: [skymap, tract, patch, band] - cls: lsst.skymap.packers.SkyMapDimensionPacker