From 07b1d118af0fcd896940059fee0907181d5d676f Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 13:05:04 -0700 Subject: [PATCH 01/16] Some simple docstyle fixes --- python/lsst/daf/butler/_butler.py | 3 +- python/lsst/daf/butler/_butlerConfig.py | 5 ++- .../lsst/daf/butler/_deferredDatasetHandle.py | 5 ++- python/lsst/daf/butler/cli/butler.py | 16 ++++++---- python/lsst/daf/butler/cli/cliLog.py | 6 ++-- python/lsst/daf/butler/cli/cmd/commands.py | 1 - python/lsst/daf/butler/cli/utils.py | 31 ++++++++++++------- .../daf/butler/core/dimensions/_universe.py | 2 +- python/lsst/daf/butler/core/mappingFactory.py | 4 +-- python/lsst/daf/butler/core/quantum.py | 2 +- python/lsst/daf/butler/core/storedFileInfo.py | 2 +- .../daf/butler/datastores/chainedDatastore.py | 3 -- .../daf/butler/datastores/fileDatastore.py | 6 +--- .../butler/datastores/inMemoryDatastore.py | 2 -- .../daf/butler/formatters/astropyTable.py | 2 +- python/lsst/daf/butler/formatters/file.py | 1 - python/lsst/daf/butler/registry/_dbAuth.py | 3 +- python/lsst/daf/butler/registry/_registry.py | 2 +- .../butler/registry/interfaces/_dimensions.py | 3 +- .../daf/butler/registry/interfaces/_opaque.py | 2 +- python/lsst/daf/butler/registry/managers.py | 3 +- .../daf/butler/registry/obscore/_manager.py | 1 - .../daf/butler/registry/obscore/pgsphere.py | 8 ++--- .../butler/registry/queries/_query_context.py | 4 +-- .../daf/butler/registry/queries/_structs.py | 2 +- .../queries/expressions/normalForm.py | 2 +- .../daf/butler/registry/tests/_registry.py | 5 +-- python/lsst/daf/butler/registry/wildcards.py | 2 +- python/lsst/daf/butler/script/_associate.py | 1 - python/lsst/daf/butler/script/ingest_files.py | 1 - .../butler/script/register_dataset_type.py | 1 - .../daf/butler/script/removeCollections.py | 3 +- .../lsst/daf/butler/tests/_datasetsHelper.py | 8 ++--- .../daf/butler/tests/_examplePythonTypes.py | 4 +-- python/lsst/daf/butler/tests/_testRepo.py | 1 - .../lsst/daf/butler/tests/cliCmdTestBase.py | 9 ++++-- .../lsst/daf/butler/tests/cliLogTestBase.py | 22 +++++++------ .../lsst/daf/butler/tests/testFormatters.py | 11 ++++--- 38 files changed, 97 insertions(+), 92 deletions(-) diff --git a/python/lsst/daf/butler/_butler.py b/python/lsst/daf/butler/_butler.py index eb83176548..e51c7f0d4d 100644 --- a/python/lsst/daf/butler/_butler.py +++ b/python/lsst/daf/butler/_butler.py @@ -19,8 +19,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -""" -Butler top level classes. +"""Butler top level classes. """ from __future__ import annotations diff --git a/python/lsst/daf/butler/_butlerConfig.py b/python/lsst/daf/butler/_butlerConfig.py index ae14f8fb7b..6b2f17800e 100644 --- a/python/lsst/daf/butler/_butlerConfig.py +++ b/python/lsst/daf/butler/_butlerConfig.py @@ -19,8 +19,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -""" -Configuration classes specific to the Butler +"""Configuration classes specific to the Butler. """ from __future__ import annotations @@ -41,7 +40,7 @@ class ButlerConfig(Config): - """Contains the configuration for a `Butler` + """Contains the configuration for a `Butler`. The configuration is read and merged with default configurations for the particular classes. The defaults are read according to the rules diff --git a/python/lsst/daf/butler/_deferredDatasetHandle.py b/python/lsst/daf/butler/_deferredDatasetHandle.py index b388c26806..799f61dadf 100644 --- a/python/lsst/daf/butler/_deferredDatasetHandle.py +++ b/python/lsst/daf/butler/_deferredDatasetHandle.py @@ -19,8 +19,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -""" -Module containing classes used with deferring dataset loading +"""Module containing classes used with deferring dataset loading. """ from __future__ import annotations @@ -45,7 +44,7 @@ def get( parameters: dict | None = None, storageClass: str | StorageClass | None = None, ) -> Any: - """Retrieves the dataset pointed to by this handle + """Retrieves the dataset pointed to by this handle. This handle may be used multiple times, possibly with different parameters. diff --git a/python/lsst/daf/butler/cli/butler.py b/python/lsst/daf/butler/cli/butler.py index 4f4ccaf94a..38f33b0302 100755 --- a/python/lsst/daf/butler/cli/butler.py +++ b/python/lsst/daf/butler/cli/butler.py @@ -82,7 +82,8 @@ def _importPlugin(pluginName: str) -> types.ModuleType | type | None | click.Com class LoaderCLI(click.MultiCommand, abc.ABC): """Extends `click.MultiCommand`, which dispatches to subcommands, to load - subcommands at runtime.""" + subcommands at runtime. + """ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -90,8 +91,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: @property @abc.abstractmethod def localCmdPkg(self) -> str: - """localCmdPkg identifies the location of the commands that are in this - package. `getLocalCommands` assumes that the commands can be found in + """Identifies the location of the commands that are in this + package. + + `getLocalCommands` assumes that the commands can be found in `localCmdPkg.__all__`, if this is not the case then getLocalCommands should be overridden. @@ -177,7 +180,8 @@ def get_command(self, ctx: click.Context, name: str) -> click.Command | None: def _setupLogging(self, ctx: click.Context | None) -> None: """Init the logging system and config it for the command. - Subcommands may further configure the log settings.""" + Subcommands may further configure the log settings. + """ if isinstance(ctx, click.Context): CliLog.initLog( longlog=ctx.params.get(long_log_option.name(), False), @@ -228,7 +232,8 @@ def _cmdNameToFuncName(cls, commandName: str) -> str: """Convert butler command name to function name: change dashes (used in commands) to underscores (used in functions), and for local-package commands names that conflict with python keywords, change the local, - legal, function name to the command name.""" + legal, function name to the command name. + """ return commandName.replace("-", "_") @staticmethod @@ -308,7 +313,6 @@ def _raiseIfDuplicateCommands(commands: defaultdict[str, list[str]]) -> None: Raised if a command is offered by more than one package, with an error message to be displayed to the user. """ - msg = "" for command, packages in commands.items(): if len(packages) > 1: diff --git a/python/lsst/daf/butler/cli/cliLog.py b/python/lsst/daf/butler/cli/cliLog.py index fe4d399896..cc2fc1099a 100644 --- a/python/lsst/daf/butler/cli/cliLog.py +++ b/python/lsst/daf/butler/cli/cliLog.py @@ -69,7 +69,8 @@ class CliLog: This class can perform log uninitialization, which allows command line interface code that initializes logging to run unit tests that execute in - batches, without affecting other unit tests. See ``resetLog``.""" + batches, without affecting other unit tests. See ``resetLog``. + """ defaultLsstLogLevel = lsstLog.FATAL if lsstLog is not None else None @@ -394,7 +395,8 @@ def __repr__(self) -> str: @classmethod def _recordComponentSetting(cls, component: str | None) -> None: """Cache current levels for the given component in the list of - component levels.""" + component levels. + """ componentSettings = cls.ComponentSettings(component) cls._componentSettings.append(componentSettings) diff --git a/python/lsst/daf/butler/cli/cmd/commands.py b/python/lsst/daf/butler/cli/cmd/commands.py index 6696e85806..ee92d50932 100644 --- a/python/lsst/daf/butler/cli/cmd/commands.py +++ b/python/lsst/daf/butler/cli/cmd/commands.py @@ -112,7 +112,6 @@ def associate(**kwargs: Any) -> None: @options_file_option() def butler_import(*args: Any, **kwargs: Any) -> None: """Import data into a butler repository.""" - # `reuse_ids`` is not used by `butlerImport`. reuse_ids = kwargs.pop("reuse_ids", False) if reuse_ids: diff --git a/python/lsst/daf/butler/cli/utils.py b/python/lsst/daf/butler/cli/utils.py index 12b5d0800a..a05e2af721 100644 --- a/python/lsst/daf/butler/cli/utils.py +++ b/python/lsst/daf/butler/cli/utils.py @@ -144,7 +144,8 @@ class LogCliRunner(click.testing.CliRunner): lsst.log modules can not be set back to an uninitialized state (python logging modules can be set back to NOTSET), instead they are set to - `CliLog.defaultLsstLogLevel`.""" + `CliLog.defaultLsstLogLevel`. + """ def invoke(self, *args: Any, **kwargs: Any) -> Any: result = super().invoke(*args, **kwargs) @@ -153,7 +154,7 @@ def invoke(self, *args: Any, **kwargs: Any) -> Any: def clickResultMsg(result: click.testing.Result) -> str: - """Get a standard assert message from a click result + """Get a standard assert message from a click result. Parameters ---------- @@ -604,7 +605,8 @@ def __init__( def convert(self, value: str, param: click.Parameter | None, ctx: click.Context | None) -> Any: """Called by click.ParamType to "convert values through types". - `click.Path` uses this step to verify Path conditions.""" + `click.Path` uses this step to verify Path conditions. + """ if self.mustNotExist and os.path.exists(value): self.fail(f'Path "{value}" should not exist.') return super().convert(value, param, ctx) @@ -722,18 +724,21 @@ def __init__(self, *param_decls: Any, **kwargs: Any) -> None: def name(self) -> str: """Get the name that will be passed to the command function for this - option.""" + option. + """ return cast(str, self._name) def opts(self) -> list[str]: """Get the flags that will be used for this option on the command - line.""" + line. + """ return self._opts @property def help(self) -> str: """Get the help text for this option. Returns an empty string if no - help was defined.""" + help was defined. + """ return self.partialOpt.keywords.get("help", "") def __call__(self, *args: Any, **kwargs: Any) -> Any: @@ -742,7 +747,8 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: class MWArgumentDecorator: """Wraps the click.argument decorator to enable shared arguments to be - declared.""" + declared. + """ def __init__(self, *param_decls: Any, **kwargs: Any) -> None: self._helpText = kwargs.pop("help", None) @@ -761,7 +767,8 @@ def decorator(f: Any) -> Any: class MWCommand(click.Command): """Command subclass that stores a copy of the args list for use by the - command.""" + command. + """ extra_epilog: str | None = None @@ -882,7 +889,8 @@ class ButlerCommand(MWCommand): class OptionGroup: """Base class for an option group decorator. Requires the option group - subclass to have a property called `decorator`.""" + subclass to have a property called `decorator`. + """ decorators: list[Any] @@ -944,7 +952,8 @@ def __init__(self) -> None: @staticmethod def getFrom(ctx: click.Context) -> Any: """If needed, initialize `ctx.obj` with a new `MWCtxObj`, and return - the new or already existing `MWCtxObj`.""" + the new or already existing `MWCtxObj`. + """ if ctx.obj is not None: return ctx.obj ctx.obj = MWCtxObj() @@ -1046,7 +1055,7 @@ def sortAstropyTable(table: Table, dimensions: list[Dimension], sort_first: list order: 1. the provided named columns 2. spatial and temporal columns - 3. the rest of the columns + 3. the rest of the columns. The table is sorted in-place, and is also returned for convenience. diff --git a/python/lsst/daf/butler/core/dimensions/_universe.py b/python/lsst/daf/butler/core/dimensions/_universe.py index df4e608ae7..bd0d611669 100644 --- a/python/lsst/daf/butler/core/dimensions/_universe.py +++ b/python/lsst/daf/butler/core/dimensions/_universe.py @@ -197,7 +197,7 @@ def namespace(self) -> str: return self._namespace def isCompatibleWith(self, other: DimensionUniverse) -> bool: - """Check compatibility between this `DimensionUniverse` and another + """Check compatibility between this `DimensionUniverse` and another. Parameters ---------- diff --git a/python/lsst/daf/butler/core/mappingFactory.py b/python/lsst/daf/butler/core/mappingFactory.py index bee5bfb407..4d531709ba 100644 --- a/python/lsst/daf/butler/core/mappingFactory.py +++ b/python/lsst/daf/butler/core/mappingFactory.py @@ -33,8 +33,8 @@ class MappingFactory: - """ - Register the mapping of some key to a python type and retrieve instances. + """Register the mapping of some key to a python type and retrieve + instances. Enables instances of these classes to be retrieved from the factory later. The class can be specified as an object, class or string. diff --git a/python/lsst/daf/butler/core/quantum.py b/python/lsst/daf/butler/core/quantum.py index 1728b9f8ac..d3cdb77e89 100644 --- a/python/lsst/daf/butler/core/quantum.py +++ b/python/lsst/daf/butler/core/quantum.py @@ -49,7 +49,7 @@ def _reconstructDatasetRef( reconstitutedDimensions: dict[int, tuple[str, DimensionRecord]], universe: DimensionUniverse, ) -> DatasetRef: - """Reconstruct a DatasetRef stored in a Serialized Quantum""" + """Reconstruct a DatasetRef stored in a Serialized Quantum.""" # Reconstruct the dimension records records = {} for dId in ids: diff --git a/python/lsst/daf/butler/core/storedFileInfo.py b/python/lsst/daf/butler/core/storedFileInfo.py index c381d2d6df..fce93530c3 100644 --- a/python/lsst/daf/butler/core/storedFileInfo.py +++ b/python/lsst/daf/butler/core/storedFileInfo.py @@ -87,7 +87,7 @@ def to_record(self) -> dict[str, Any]: @property def dataset_id(self) -> DatasetId: - """Dataset ID associated with this record (`DatasetId`)""" + """Dataset ID associated with this record (`DatasetId`).""" raise NotImplementedError() def update(self, **kwargs: Any) -> StoredDatastoreItemInfo: diff --git a/python/lsst/daf/butler/datastores/chainedDatastore.py b/python/lsst/daf/butler/datastores/chainedDatastore.py index c4e7a14b8c..c2e1e4a5d6 100644 --- a/python/lsst/daf/butler/datastores/chainedDatastore.py +++ b/python/lsst/daf/butler/datastores/chainedDatastore.py @@ -140,7 +140,6 @@ def setConfigRoot(cls, root: str, config: Config, full: Config, overwrite: bool will not be overridden by this method if ``overwrite`` is `False`. This allows explicit values set in external configs to be retained. """ - # Extract the part of the config we care about updating datastoreConfig = DatastoreConfig(config, mergeDefaults=False) @@ -364,7 +363,6 @@ def get( ValueError Formatter failed to process the dataset. """ - for datastore in self.datastores: try: inMemoryObject = datastore.get(ref, parameters, storageClass=storageClass) @@ -880,7 +878,6 @@ def validateConfiguration( ----- This method checks each datastore in turn. """ - # Need to catch each of the datastore outputs and ensure that # all are tested. failures = [] diff --git a/python/lsst/daf/butler/datastores/fileDatastore.py b/python/lsst/daf/butler/datastores/fileDatastore.py index 77ae0fe898..48286a3210 100644 --- a/python/lsst/daf/butler/datastores/fileDatastore.py +++ b/python/lsst/daf/butler/datastores/fileDatastore.py @@ -515,7 +515,6 @@ def _get_expected_dataset_locations_info(self, ref: DatasetRef) -> list[tuple[Lo Will not work for files that have been ingested without using the standard file template or default formatter. """ - # If we have a component ref we always need to ask the questions # of the composite. If the composite is disassembled this routine # should return all components. If the composite was not @@ -766,7 +765,7 @@ def _overrideTransferMode(self, *datasets: FileDataset, transfer: str | None = N return transfer def _pathInStore(self, path: ResourcePathExpression) -> str | None: - """Return path relative to datastore root + """Return path relative to datastore root. Parameters ---------- @@ -1920,7 +1919,6 @@ def _locations_to_URI( If a passed in `StoredFileInfo`'s ``component`` is `None` (this is unexpected). """ - guessing = False uris = DatasetRefURIs() @@ -2271,7 +2269,6 @@ def put(self, inMemoryDataset: Any, ref: DatasetRef) -> None: allow `ChainedDatastore` to put to multiple datastores without requiring that every datastore accepts the dataset. """ - doDisassembly = self.composites.shouldBeDisassembled(ref) # doDisassembly = True @@ -2766,7 +2763,6 @@ def validateConfiguration( This method checks that all the supplied entities have valid file templates and also have formatters defined. """ - templateFailed = None try: self.templates.validateTemplates(entities, logFailures=logFailures) diff --git a/python/lsst/daf/butler/datastores/inMemoryDatastore.py b/python/lsst/daf/butler/datastores/inMemoryDatastore.py index fe206eb166..f05c6474bf 100644 --- a/python/lsst/daf/butler/datastores/inMemoryDatastore.py +++ b/python/lsst/daf/butler/datastores/inMemoryDatastore.py @@ -311,7 +311,6 @@ def get( ValueError Formatter failed to process the dataset. """ - log.debug("Retrieve %s from %s with parameters %s", ref, self.name, parameters) realID, storedItemInfo = self._get_dataset_info(ref) @@ -434,7 +433,6 @@ def getURIs(self, ref: DatasetRef, predict: bool = False) -> DatasetRefURIs: The URIs returned for in-memory datastores are not usable but provide an indication of the associated dataset. """ - # Include the dataID as a URI query query = urlencode(ref.dataId) diff --git a/python/lsst/daf/butler/formatters/astropyTable.py b/python/lsst/daf/butler/formatters/astropyTable.py index 1e5138d6f2..2bf8359186 100644 --- a/python/lsst/daf/butler/formatters/astropyTable.py +++ b/python/lsst/daf/butler/formatters/astropyTable.py @@ -29,7 +29,7 @@ class AstropyTableFormatter(FileFormatter): """Interface for reading and writing astropy.Table objects - in either ECSV or FITS format + in either ECSV or FITS format. """ supportedWriteParameters = frozenset({"format"}) diff --git a/python/lsst/daf/butler/formatters/file.py b/python/lsst/daf/butler/formatters/file.py index 6df140b74e..392a90447b 100644 --- a/python/lsst/daf/butler/formatters/file.py +++ b/python/lsst/daf/butler/formatters/file.py @@ -246,7 +246,6 @@ def read(self, component: str | None = None) -> Any: NotImplementedError Formatter does not implement a method to read from files. """ - # Read the file naively path = self.fileDescriptor.location.path data = self._readFile(path, self.fileDescriptor.storageClass.pytype) diff --git a/python/lsst/daf/butler/registry/_dbAuth.py b/python/lsst/daf/butler/registry/_dbAuth.py index 535df5d52a..ab5dd79017 100644 --- a/python/lsst/daf/butler/registry/_dbAuth.py +++ b/python/lsst/daf/butler/registry/_dbAuth.py @@ -162,7 +162,6 @@ def getAuth( Examples -------- - The connection URL ``postgresql://user@host.example.com:5432/my_database`` matches against the identical string as a pattern. Other patterns that would match @@ -271,7 +270,7 @@ def getUrl(self, url: str) -> str: dictionary is missing elements, the authorization file is misconfigured, or no matching authorization is found. - See also + See Also -------- getAuth """ diff --git a/python/lsst/daf/butler/registry/_registry.py b/python/lsst/daf/butler/registry/_registry.py index 0bf026345e..29ab5a3453 100644 --- a/python/lsst/daf/butler/registry/_registry.py +++ b/python/lsst/daf/butler/registry/_registry.py @@ -746,7 +746,7 @@ def insertDatasets( expand: bool = True, idGenerationMode: DatasetIdGenEnum = DatasetIdGenEnum.UNIQUE, ) -> list[DatasetRef]: - """Insert one or more datasets into the `Registry` + """Insert one or more datasets into the `Registry`. This always adds new datasets; to associate existing datasets with a new collection, use ``associate``. diff --git a/python/lsst/daf/butler/registry/interfaces/_dimensions.py b/python/lsst/daf/butler/registry/interfaces/_dimensions.py index a8e0f3f8c9..a311b153b5 100644 --- a/python/lsst/daf/butler/registry/interfaces/_dimensions.py +++ b/python/lsst/daf/butler/registry/interfaces/_dimensions.py @@ -589,7 +589,8 @@ def elements(self) -> tuple[DatabaseDimensionElement, DatabaseDimensionElement]: @abstractmethod def clearCaches(self) -> None: """Clear any cached state about which overlaps have been - materialized.""" + materialized. + """ raise NotImplementedError() @abstractmethod diff --git a/python/lsst/daf/butler/registry/interfaces/_opaque.py b/python/lsst/daf/butler/registry/interfaces/_opaque.py index 72b872f45e..4f12cbb495 100644 --- a/python/lsst/daf/butler/registry/interfaces/_opaque.py +++ b/python/lsst/daf/butler/registry/interfaces/_opaque.py @@ -53,7 +53,7 @@ def __init__(self, name: str): @abstractmethod def insert(self, *data: dict, transaction: DatastoreTransaction | None = None) -> None: - """Insert records into the table + """Insert records into the table. Parameters ---------- diff --git a/python/lsst/daf/butler/registry/managers.py b/python/lsst/daf/butler/registry/managers.py index e493039a60..076f7bc399 100644 --- a/python/lsst/daf/butler/registry/managers.py +++ b/python/lsst/daf/butler/registry/managers.py @@ -449,7 +449,8 @@ def as_dict(self) -> Mapping[str, VersionedExtension]: def refresh(self) -> None: """Refresh all in-memory state by querying the database or clearing - caches.""" + caches. + """ self.dimensions.clearCaches() self.collections.refresh() self.datasets.refresh() diff --git a/python/lsst/daf/butler/registry/obscore/_manager.py b/python/lsst/daf/butler/registry/obscore/_manager.py index 2ec6692591..a52c9f4c26 100644 --- a/python/lsst/daf/butler/registry/obscore/_manager.py +++ b/python/lsst/daf/butler/registry/obscore/_manager.py @@ -289,7 +289,6 @@ def _populate(self, refs: Iterable[DatasetRef], context: SqlQueryContext) -> int def _check_dataset_run(self, run: str) -> bool: """Check that specified run collection matches know patterns.""" - if not self.run_patterns: # Empty list means take anything. return True diff --git a/python/lsst/daf/butler/registry/obscore/pgsphere.py b/python/lsst/daf/butler/registry/obscore/pgsphere.py index f790aa8fc3..6faa39c894 100644 --- a/python/lsst/daf/butler/registry/obscore/pgsphere.py +++ b/python/lsst/daf/butler/registry/obscore/pgsphere.py @@ -50,11 +50,11 @@ class PgSpherePoint(UserDefinedType): cache_ok = True def get_col_spec(self, **kw: Any) -> str: - """Return name of the column type""" + """Return name of the column type.""" return "SPOINT" def bind_processor(self, dialect: sqlalchemy.engine.Dialect) -> Callable: - """Return processor method for bind values""" + """Return processor method for bind values.""" def _process(value: LonLat | None) -> str | None: if value is None: @@ -78,11 +78,11 @@ class PgSpherePolygon(UserDefinedType): cache_ok = True def get_col_spec(self, **kw: Any) -> str: - """Return name of the column type""" + """Return name of the column type.""" return "SPOLY" def bind_processor(self, dialect: sqlalchemy.engine.Dialect) -> Callable: - """Return processor method for bind values""" + """Return processor method for bind values.""" def _process(value: Sequence[LonLat] | None) -> str | None: if value is None: diff --git a/python/lsst/daf/butler/registry/queries/_query_context.py b/python/lsst/daf/butler/registry/queries/_query_context.py index e94b4fe3cd..a93430102c 100644 --- a/python/lsst/daf/butler/registry/queries/_query_context.py +++ b/python/lsst/daf/butler/registry/queries/_query_context.py @@ -337,7 +337,7 @@ def make_spatial_region_skypix_predicate( region: lsst.sphgeom.Region, ) -> Predicate: """Return a `~lsst.daf.relation.column_expressions.Predicate` that - tests whether two region columns overlap + tests whether two region columns overlap. This operation only works with `iteration engines `; it is usually used to refine the @@ -375,7 +375,7 @@ def make_spatial_region_overlap_predicate( rhs: ColumnExpression, ) -> Predicate: """Return a `~lsst.daf.relation.column_expressions.Predicate` that - tests whether two regions overlap + tests whether two regions overlap. This operation only works with `iteration engines `; it is usually diff --git a/python/lsst/daf/butler/registry/queries/_structs.py b/python/lsst/daf/butler/registry/queries/_structs.py index 4a7b386c02..cf28ad96b8 100644 --- a/python/lsst/daf/butler/registry/queries/_structs.py +++ b/python/lsst/daf/butler/registry/queries/_structs.py @@ -170,7 +170,7 @@ class OrderByClauseColumn: @dataclasses.dataclass(frozen=True, eq=False) class OrderByClause: - """Class for information about columns in ORDER BY clause""" + """Class for information about columns in ORDER BY clause.""" @classmethod def parse_general(cls, order_by: Iterable[str], graph: DimensionGraph) -> OrderByClause: diff --git a/python/lsst/daf/butler/registry/queries/expressions/normalForm.py b/python/lsst/daf/butler/registry/queries/expressions/normalForm.py index 7d829605ce..830684c577 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/normalForm.py +++ b/python/lsst/daf/butler/registry/queries/expressions/normalForm.py @@ -747,7 +747,7 @@ def _normalizeDispatchBinary( def unwrap(self) -> Node: """Return an transformed expression tree. - Return + Return: ------ tree : `Node` Tree node representing the same expression (and form) as ``self``. diff --git a/python/lsst/daf/butler/registry/tests/_registry.py b/python/lsst/daf/butler/registry/tests/_registry.py index ef4fbf40a4..e52b0f9c0c 100644 --- a/python/lsst/daf/butler/registry/tests/_registry.py +++ b/python/lsst/daf/butler/registry/tests/_registry.py @@ -959,7 +959,8 @@ def testNestedTransaction(self): def testInstrumentDimensions(self): """Test queries involving only instrument dimensions, with no joins to - skymap.""" + skymap. + """ registry = self.makeRegistry() # need a bunch of dimensions and datasets for test @@ -2562,7 +2563,7 @@ def testQueryIntRangeExpressions(self): def testQueryResultSummaries(self): """Test summary methods like `count`, `any`, and `explain_no_results` - on `DataCoordinateQueryResults` and `DatasetQueryResults` + on `DataCoordinateQueryResults` and `DatasetQueryResults`. """ registry = self.makeRegistry() self.loadData(registry, "base.yaml") diff --git a/python/lsst/daf/butler/registry/wildcards.py b/python/lsst/daf/butler/registry/wildcards.py index bc381315fb..7622f84184 100644 --- a/python/lsst/daf/butler/registry/wildcards.py +++ b/python/lsst/daf/butler/registry/wildcards.py @@ -367,7 +367,7 @@ def __repr__(self) -> str: @dataclasses.dataclass(frozen=True) class CollectionWildcard: - """A validated wildcard for collection names + """A validated wildcard for collection names. The `from_expression` method should almost always be used to construct instances, as the regular constructor performs no checking of inputs (and diff --git a/python/lsst/daf/butler/script/_associate.py b/python/lsst/daf/butler/script/_associate.py index 024c9b2e5f..838a6e83aa 100644 --- a/python/lsst/daf/butler/script/_associate.py +++ b/python/lsst/daf/butler/script/_associate.py @@ -36,7 +36,6 @@ def associate( find_first: bool, ) -> None: """Add existing datasets to a CHAINED collection.""" - butler = Butler(repo, writeable=True) butler.registry.registerCollection(collection, CollectionType.TAGGED) diff --git a/python/lsst/daf/butler/script/ingest_files.py b/python/lsst/daf/butler/script/ingest_files.py index 1fca50ec00..8784423a66 100644 --- a/python/lsst/daf/butler/script/ingest_files.py +++ b/python/lsst/daf/butler/script/ingest_files.py @@ -82,7 +82,6 @@ def ingest_files( transfer : `str`, optional Transfer mode to use for ingest. """ - # Check that the formatter can be imported -- validate this as soon # as possible before we read a potentially large table file. if formatter: diff --git a/python/lsst/daf/butler/script/register_dataset_type.py b/python/lsst/daf/butler/script/register_dataset_type.py index 333e07c4f2..bf297132b6 100644 --- a/python/lsst/daf/butler/script/register_dataset_type.py +++ b/python/lsst/daf/butler/script/register_dataset_type.py @@ -63,7 +63,6 @@ def register_dataset_type( be created by this command. They are always derived from the composite dataset type. """ - butler = Butler(repo, writeable=True) composite, component = DatasetType.splitDatasetTypeName(dataset_type) diff --git a/python/lsst/daf/butler/script/removeCollections.py b/python/lsst/daf/butler/script/removeCollections.py index 1e5d64ff92..5358c26f3e 100644 --- a/python/lsst/daf/butler/script/removeCollections.py +++ b/python/lsst/daf/butler/script/removeCollections.py @@ -49,7 +49,8 @@ class RemoveCollectionResult: @dataclass class CollectionInfo: """Lightweight container to hold the name and type of non-run - collections, as well as the names of run collections.""" + collections, as well as the names of run collections. + """ nonRunCollections: Table runCollections: Table diff --git a/python/lsst/daf/butler/tests/_datasetsHelper.py b/python/lsst/daf/butler/tests/_datasetsHelper.py index b8d6e76ab4..a644a9d3f7 100644 --- a/python/lsst/daf/butler/tests/_datasetsHelper.py +++ b/python/lsst/daf/butler/tests/_datasetsHelper.py @@ -41,7 +41,7 @@ class DatasetTestHelper: - """Helper methods for Datasets""" + """Helper methods for Datasets.""" def makeDatasetRef( self, @@ -54,7 +54,7 @@ def makeDatasetRef( run: str | None = None, conform: bool = True, ) -> DatasetRef: - """Make a DatasetType and wrap it in a DatasetRef for a test""" + """Make a DatasetType and wrap it in a DatasetRef for a test.""" return self._makeDatasetRef( datasetTypeName, dimensions, @@ -94,7 +94,7 @@ def _makeDatasetRef( class DatastoreTestHelper: - """Helper methods for Datastore tests""" + """Helper methods for Datastore tests.""" root: str | None config: Config @@ -102,7 +102,7 @@ class DatastoreTestHelper: configFile: str def setUpDatastoreTests(self, registryClass: type[Registry], configClass: type[Config]) -> None: - """Shared setUp code for all Datastore tests""" + """Shared setUp code for all Datastore tests.""" self.registry = registryClass() self.config = configClass(self.configFile) diff --git a/python/lsst/daf/butler/tests/_examplePythonTypes.py b/python/lsst/daf/butler/tests/_examplePythonTypes.py index 63463fab72..d861f7ecad 100644 --- a/python/lsst/daf/butler/tests/_examplePythonTypes.py +++ b/python/lsst/daf/butler/tests/_examplePythonTypes.py @@ -286,7 +286,7 @@ class MetricsExampleDataclass: class ListDelegate(StorageClassDelegate): - """Parameter handler for list parameters""" + """Parameter handler for list parameters.""" def handleParameters(self, inMemoryDataset: Any, parameters: Mapping[str, Any] | None = None) -> Any: """Modify the in-memory dataset using the supplied parameters, @@ -316,7 +316,7 @@ def handleParameters(self, inMemoryDataset: Any, parameters: Mapping[str, Any] | class MetricsDelegate(StorageClassDelegate): - """Parameter handler for parameters using Metrics""" + """Parameter handler for parameters using Metrics.""" def handleParameters(self, inMemoryDataset: Any, parameters: Mapping[str, Any] | None = None) -> Any: """Modify the in-memory dataset using the supplied parameters, diff --git a/python/lsst/daf/butler/tests/_testRepo.py b/python/lsst/daf/butler/tests/_testRepo.py index 0c218fe14d..1903bffd7b 100644 --- a/python/lsst/daf/butler/tests/_testRepo.py +++ b/python/lsst/daf/butler/tests/_testRepo.py @@ -171,7 +171,6 @@ def _makeRecords(dataIds: Mapping[str, Iterable], universe: DimensionUniverse) - `~lsst.daf.butler.DimensionRecord` for each input name. Related dimensions (e.g., instruments and detectors) are linked arbitrarily. """ - # Create values for all dimensions that are (recursive) required or implied # dependencies of the given ones. complete_data_id_values = {} diff --git a/python/lsst/daf/butler/tests/cliCmdTestBase.py b/python/lsst/daf/butler/tests/cliCmdTestBase.py index e69ad6c805..439d66a8df 100644 --- a/python/lsst/daf/butler/tests/cliCmdTestBase.py +++ b/python/lsst/daf/butler/tests/cliCmdTestBase.py @@ -61,21 +61,24 @@ def command() -> click.Command: @property def cli(self) -> click.core.Command: """Get the command line interface function under test, can be - overridden to test CLIs other than butler.""" + overridden to test CLIs other than butler. + """ return butler.cli @property def mock(self) -> unittest.mock.Mock: """Get the mock object to use in place of `mockFuncName`. If not provided will use the default provided by `unittest.mock.patch`, this - is usually a `unittest.mock.MagicMock`.""" + is usually a `unittest.mock.MagicMock`. + """ return DEFAULT @property @abc.abstractmethod def mockFuncName(self) -> str: """The qualified name of the function to mock, will be passed to - unittest.mock.patch, see python docs for details.""" + unittest.mock.patch, see python docs for details. + """ pass def setUp(self) -> None: diff --git a/python/lsst/daf/butler/tests/cliLogTestBase.py b/python/lsst/daf/butler/tests/cliLogTestBase.py index 3cdfa88746..affcff232a 100644 --- a/python/lsst/daf/butler/tests/cliLogTestBase.py +++ b/python/lsst/daf/butler/tests/cliLogTestBase.py @@ -120,7 +120,8 @@ def setUp(self) -> None: class PythonLogger: """Keeps track of log level of a component and number of handlers - attached to it at the time this object was initialized.""" + attached to it at the time this object was initialized. + """ def __init__(self, component: str | None) -> None: self.logger = logging.getLogger(component) @@ -128,7 +129,8 @@ def __init__(self, component: str | None) -> None: class LsstLogger: """Keeps track of log level for a component at the time this object was - initialized.""" + initialized. + """ def __init__(self, component: str) -> None: self.logger = lsstLog.getLogger(component) if lsstLog else None @@ -138,7 +140,8 @@ def runTest(self, cmd: Callable) -> None: """Test that the log context manager works with the butler cli to initialize the logging system according to cli inputs for the duration of the command execution and resets the logging system to its previous - state or expected state when command execution finishes.""" + state or expected state when command execution finishes. + """ pyRoot = self.PythonLogger(None) pyButler = self.PythonLogger("lsst.daf.butler") pyLsstRoot = self.PythonLogger("lsst") @@ -171,8 +174,8 @@ def test_butlerCliLog(self) -> None: """Test that the log context manager works with the butler cli to initialize the logging system according to cli inputs for the duration of the command execution and resets the logging system to its previous - state or expected state when command execution finishes.""" - + state or expected state when command execution finishes. + """ # Run with two different log level settings. log_levels = ( # --log-level / --log-level / expected pyroot, pylsst, pybutler, @@ -249,14 +252,14 @@ def _test_levels(self, log_levels: tuple[tuple[str, str, int, int, int, Any, Any def test_helpLogReset(self) -> None: """Verify that when a command does not execute, like when the help menu - is printed instead, that CliLog is still reset.""" - + is printed instead, that CliLog is still reset. + """ self.runTest(partial(self.runner.invoke, butlerCli, ["command-log-settings-test", "--help"])) def testLongLog(self) -> None: """Verify the timestamp is in the log messages when the --long-log - flag is set.""" - + flag is set. + """ # When longlog=True, loglines start with the log level and a # timestamp with the following format: # "year-month-day T hour-minute-second.millisecond-zoneoffset" @@ -358,7 +361,6 @@ def testFileLogging(self) -> None: def testLogTty(self) -> None: """Verify that log output to terminal can be suppressed.""" - with self.runner.isolated_filesystem(): for log_tty in (True, False): # The pytest log handler interferes with the log configuration diff --git a/python/lsst/daf/butler/tests/testFormatters.py b/python/lsst/daf/butler/tests/testFormatters.py index 7c1ca93871..8f527ae136 100644 --- a/python/lsst/daf/butler/tests/testFormatters.py +++ b/python/lsst/daf/butler/tests/testFormatters.py @@ -45,7 +45,8 @@ class DoNothingFormatter(Formatter): """A test formatter that does not need to format anything and has - parameters.""" + parameters. + """ def read(self, component: str | None = None) -> Any: raise NotImplementedError("Type does not support reading") @@ -89,7 +90,8 @@ class MultipleExtensionsFormatter(SingleExtensionFormatter): class LenientYamlFormatter(YamlFormatter): """A test formatter that allows any file extension but always reads and - writes YAML.""" + writes YAML. + """ extension = ".yaml" @@ -100,7 +102,8 @@ def validateExtension(cls, location: Location) -> None: class MetricsExampleFormatter(Formatter): """A specialist test formatter for metrics that supports components - directly without assembler delegate.""" + directly without assembler delegate. + """ supportedExtensions = frozenset({".yaml", ".json"}) @@ -134,7 +137,6 @@ def read(self, component: str | None = None) -> Any: Raised when parameters passed with fileDescriptor are not supported. """ - # This formatter can not read a subset from disk because it # uses yaml or json. path = self.fileDescriptor.location.path @@ -228,7 +230,6 @@ def read(self, component: str | None = None) -> Any: Raised when parameters passed with fileDescriptor are not supported. """ - # This formatter can not read a subset from disk because it # uses yaml. path = self.fileDescriptor.location.path From f7431e171281c678fbeefdd100a5bacd74467dc6 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 13:14:23 -0700 Subject: [PATCH 02/16] Fix a numpydoc section name --- python/lsst/daf/butler/core/time_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/lsst/daf/butler/core/time_utils.py b/python/lsst/daf/butler/core/time_utils.py index 133668ef94..ed3bbdfa04 100644 --- a/python/lsst/daf/butler/core/time_utils.py +++ b/python/lsst/daf/butler/core/time_utils.py @@ -74,8 +74,8 @@ def astropy_to_nsec(self, astropy_time: astropy.time.Time) -> int: time_nsec : `int` Nanoseconds since epoch. - Note - ---- + Notes + ----- Only the limited range of input times is supported by this method as it is defined useful in the context of Butler and Registry. If input time is earlier `min_time` then this method returns `min_nsec`. If input @@ -123,8 +123,8 @@ def nsec_to_astropy(self, time_nsec: int) -> astropy.time.Time: astropy_time : `astropy.time.Time` Time to be converted. - Note - ---- + Notes + ----- Usually the input time for this method is the number returned from `astropy_to_nsec` which has a limited range. This method does not check that the number falls in the supported range and can produce output From d24500b8744388c528415f97bf299efe239515fa Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 13:22:23 -0700 Subject: [PATCH 03/16] Some simple automated fixes to test docstrings --- tests/data/registry/spatial.py | 3 ++- tests/test_butler.py | 13 +++++------ tests/test_cliCmdConfigDump.py | 2 +- tests/test_cliCmdCreate.py | 1 - tests/test_cliCmdImport.py | 3 ++- tests/test_cliCmdPruneDatasets.py | 33 ++++++++++++++++++--------- tests/test_cliCmdQueryDataIds.py | 3 ++- tests/test_cliCmdQueryDatasets.py | 4 ++-- tests/test_cliCmdRemoveCollections.py | 2 -- tests/test_cliLog.py | 3 ++- tests/test_cliPluginLoader.py | 3 ++- tests/test_cliUtilSplitCommas.py | 26 ++++++++++++--------- tests/test_cliUtilSplitKv.py | 21 +++++++++++------ tests/test_cliUtilToUpper.py | 8 +++---- tests/test_cliUtils.py | 27 ++++++++++++++-------- tests/test_config.py | 8 ++----- tests/test_datastore.py | 30 +++++++++++++----------- tests/test_exprParserLex.py | 1 - tests/test_exprParserYacc.py | 2 -- tests/test_expressions.py | 6 ----- tests/test_formatter.py | 5 ++-- tests/test_logFormatter.py | 1 - tests/test_logging.py | 2 -- tests/test_obscore.py | 9 -------- tests/test_postgresql.py | 6 ++--- tests/test_quantumBackedButler.py | 12 ---------- tests/test_simpleButler.py | 2 -- tests/test_sqlite.py | 4 ++-- tests/test_storageClass.py | 7 +++--- tests/test_templates.py | 1 - tests/test_timespan.py | 1 - tests/test_versioning.py | 3 --- 32 files changed, 122 insertions(+), 130 deletions(-) diff --git a/tests/data/registry/spatial.py b/tests/data/registry/spatial.py index 1ac409df87..fc0df27f00 100644 --- a/tests/data/registry/spatial.py +++ b/tests/data/registry/spatial.py @@ -604,7 +604,8 @@ def func(index, center, vertices): def flatten_ranges(ranges: RangeSet) -> Iterator[int]: """Flatten an `lsst.sphgeom.RangeSet` into an iterator over pixel - indices.""" + indices. + """ for begin, end in ranges: yield from range(begin, end) diff --git a/tests/test_butler.py b/tests/test_butler.py index dc6f4686a3..a9bbcf3d71 100644 --- a/tests/test_butler.py +++ b/tests/test_butler.py @@ -135,7 +135,8 @@ class TransactionTestError(Exception): class ButlerConfigTests(unittest.TestCase): """Simple tests for ButlerConfig that are not tested in any other test - cases.""" + cases. + """ def testSearchPath(self) -> None: configFile = os.path.join(TESTDIR, "config", "basic", "butler.yaml") @@ -155,7 +156,8 @@ def testSearchPath(self) -> None: class ButlerPutGetTests(TestCaseMixin): """Helper method for running a suite of put/get tests from different - butler configurations.""" + butler configurations. + """ root: str default_run = "ingésτ😺" @@ -790,7 +792,6 @@ def testStorageClassOverrideGet(self) -> None: def testPytypePutCoercion(self) -> None: """Test python type coercion on Butler.get and put.""" - # Store some data with the normal example storage class. storageClass = self.storageClassFactory.getStorageClass("StructuredDataNoComponents") datasetTypeName = "test_metric" @@ -1166,7 +1167,6 @@ def testStringification(self) -> None: def testButlerRewriteDataId(self) -> None: """Test that dataIds can be rewritten based on dimension records.""" - butler = Butler(self.tmpConfigFile, run=self.default_run) storageClass = self.storageClassFactory.getStorageClass("StructuredDataDict") @@ -1316,7 +1316,8 @@ def testImportExportVirtualComposite(self) -> None: def runImportExportTest(self, storageClass: StorageClass) -> None: """This test does an export to a temp directory and an import back - into a new temp directory repo. It does not assume a posix datastore""" + into a new temp directory repo. It does not assume a posix datastore + """ exportButler = self.runPutGetTest(storageClass, "test_metric") # Test that we must have a file extension. @@ -1660,7 +1661,6 @@ def testPruneDatasets(self) -> None: def testPytypeCoercion(self) -> None: """Test python type coercion on Butler.get and put.""" - # Store some data with the normal example storage class. storageClass = self.storageClassFactory.getStorageClass("StructuredDataNoComponents") datasetTypeName = "test_metric" @@ -2120,7 +2120,6 @@ def _absolute_transfer(self, transfer: str) -> None: def assertButlerTransfers(self, purge: bool = False, storageClassName: str = "StructuredData") -> None: """Test that a run can be transferred to another butler.""" - storageClass = self.storageClassFactory.getStorageClass(storageClassName) datasetTypeName = "random_data" diff --git a/tests/test_cliCmdConfigDump.py b/tests/test_cliCmdConfigDump.py index 16be24e2ca..6d4d2d86c8 100644 --- a/tests/test_cliCmdConfigDump.py +++ b/tests/test_cliCmdConfigDump.py @@ -71,7 +71,7 @@ def test_stdout(self): self.assertIn("storageClasses", cfg) def test_file(self): - """test dumping the config to a file.""" + """Test dumping the config to a file.""" with self.runner.isolated_filesystem(): result = self.runner.invoke(butler.cli, ["create", "here"]) self.assertEqual(result.exit_code, 0, clickResultMsg(result)) diff --git a/tests/test_cliCmdCreate.py b/tests/test_cliCmdCreate.py index 8cb1158188..68372bcea1 100644 --- a/tests/test_cliCmdCreate.py +++ b/tests/test_cliCmdCreate.py @@ -48,7 +48,6 @@ def test_requiredMissing(self): def test_all(self): """Test all parameters.""" - self.run_test( [ "create", diff --git a/tests/test_cliCmdImport.py b/tests/test_cliCmdImport.py index a3661b2d71..cd111c1671 100644 --- a/tests/test_cliCmdImport.py +++ b/tests/test_cliCmdImport.py @@ -53,7 +53,8 @@ def test_all(self): def test_missingArgument(self): """Verify the command fails if either of the positional arguments, - REPO or DIRECTORY, is missing.""" + REPO or DIRECTORY, is missing. + """ self.run_missing(["import", "foo"], r"Error: Missing argument ['\"]DIRECTORY['\"].") diff --git a/tests/test_cliCmdPruneDatasets.py b/tests/test_cliCmdPruneDatasets.py index 4be93ed620..e4dbe8c1eb 100644 --- a/tests/test_cliCmdPruneDatasets.py +++ b/tests/test_cliCmdPruneDatasets.py @@ -210,7 +210,8 @@ def test_defaults_doContinue(self): """Test running with the default values. Verify that with the default flags that the subcommand says what it - will do, prompts for input, and says that it's done.""" + will do, prompts for input, and says that it's done. + """ self.run_test( cliArgs=["myCollection", "--unstore"], exPruneDatasetsCallArgs=self.makePruneDatasetsArgs(refs=getDatasets(), unstore=True), @@ -229,7 +230,8 @@ def test_defaults_doNotContinue(self): """Test running with the default values but not continuing. Verify that with the default flags that the subcommand says what it - will do, prompts for input, and aborts when told not to continue.""" + will do, prompts for input, and aborts when told not to continue. + """ self.run_test( cliArgs=["myCollection", "--unstore"], exPruneDatasetsCallArgs=None, @@ -247,7 +249,8 @@ def test_dryRun_unstore(self): """Test the --dry-run flag with --unstore. Verify that with the dry-run flag the subcommand says what it would - remove, but does not remove the datasets.""" + remove, but does not remove the datasets. + """ self.run_test( cliArgs=["myCollection", "--dry-run", "--unstore"], exPruneDatasetsCallArgs=None, @@ -260,7 +263,8 @@ def test_dryRun_disassociate(self): """Test the --dry-run flag with --disassociate. Verify that with the dry-run flag the subcommand says what it would - remove, but does not remove the datasets.""" + remove, but does not remove the datasets. + """ collection = "myCollection" self.run_test( cliArgs=[collection, "--dry-run", "--disassociate", "tag1"], @@ -277,7 +281,8 @@ def test_dryRun_unstoreAndDisassociate(self): """Test the --dry-run flag with --unstore and --disassociate. Verify that with the dry-run flag the subcommand says what it would - remove, but does not remove the datasets.""" + remove, but does not remove the datasets. + """ collection = "myCollection" self.run_test( cliArgs=[collection, "--dry-run", "--unstore", "--disassociate", "tag1"], @@ -295,7 +300,8 @@ def test_noConfirm(self): Verify that with the no-confirm flag the subcommand does not ask for a confirmation, prints the did remove message and the tables that were - passed for removal.""" + passed for removal. + """ self.run_test( cliArgs=["myCollection", "--no-confirm", "--unstore"], exPruneDatasetsCallArgs=self.makePruneDatasetsArgs(refs=getDatasets(), unstore=True), @@ -308,7 +314,8 @@ def test_quiet(self): """Test the --quiet flag. Verify that with the quiet flag and the no-confirm flags set that no - output is produced by the subcommand.""" + output is produced by the subcommand. + """ self.run_test( cliArgs=["myCollection", "--quiet", "--unstore"], exPruneDatasetsCallArgs=self.makePruneDatasetsArgs(refs=getDatasets(), unstore=True), @@ -359,7 +366,8 @@ def test_noDatasets(self): def test_purgeWithDisassociate(self): """Verify there is an error when --purge and --disassociate are both - passed in.""" + passed in. + """ self.run_test( cliArgs=["--purge", "run", "--disassociate", "tag1", "tag2"], exPruneDatasetsCallArgs=None, @@ -371,7 +379,8 @@ def test_purgeWithDisassociate(self): def test_purgeNoOp(self): """Verify there is an error when none of --purge, --unstore, or - --disassociate are passed.""" + --disassociate are passed. + """ self.run_test( cliArgs=[], exPruneDatasetsCallArgs=None, @@ -445,7 +454,8 @@ def test_purgeImpliedArgsWithCollections(self, mockGetCollectionType): ) def test_purgeOnNonRunCollection(self, mockGetCollectionType): """Verify calling run on a non-run collection fails with expected - error message.""" + error message. + """ collectionName = "myTaggedCollection" self.run_test( cliArgs=["--purge", collectionName], @@ -483,7 +493,8 @@ def test_disassociateImpliedArgs(self): def test_disassociateImpliedArgsWithCollections(self): """Verify the arguments implied by --disassociate, with a --collection - flag.""" + flag. + """ self.run_test( cliArgs=["myCollection", "--disassociate", "tag1", "--disassociate", "tag2", "--no-confirm"], exPruneDatasetsCallArgs=self.makePruneDatasetsArgs( diff --git a/tests/test_cliCmdQueryDataIds.py b/tests/test_cliCmdQueryDataIds.py index 9669fe4511..6005fe1eda 100644 --- a/tests/test_cliCmdQueryDataIds.py +++ b/tests/test_cliCmdQueryDataIds.py @@ -40,7 +40,8 @@ class QueryDataIdsTest(unittest.TestCase, ButlerTestHelper): @staticmethod def _queryDataIds(repo, dimensions=(), collections=(), datasets=None, where=""): """Helper to populate the call to script.queryDataIds with default - values.""" + values. + """ return script.queryDataIds( repo=repo, dimensions=dimensions, diff --git a/tests/test_cliCmdQueryDatasets.py b/tests/test_cliCmdQueryDatasets.py index fb4a0bcc82..80dbca1b9c 100644 --- a/tests/test_cliCmdQueryDatasets.py +++ b/tests/test_cliCmdQueryDatasets.py @@ -247,8 +247,8 @@ def testGlobDatasetType(self): def testFindFirstAndCollections(self): """Test the find-first option, and the collections option, since it - is required for find-first.""" - + is required for find-first. + """ testRepo = MetricTestRepo(self.repoDir, configFile=self.configFile) # Add a new run, and add a dataset to shadow an existing dataset. diff --git a/tests/test_cliCmdRemoveCollections.py b/tests/test_cliCmdRemoveCollections.py index 157173a947..15a2974259 100644 --- a/tests/test_cliCmdRemoveCollections.py +++ b/tests/test_cliCmdRemoveCollections.py @@ -131,7 +131,6 @@ def testRemoveScript(self): Combining several tests into one case allows us to reuse the test repo, which saves execution time. """ - # Test wildcard with chained collections: # Add a couple chained collections @@ -177,7 +176,6 @@ def testRemoveScript(self): def testRemoveCmd(self): """Test remove command outputs.""" - # Test expected output with a non-existent collection: result = self.runner.invoke(butlerCli, ["remove-collections", self.root, "fake_collection"]) diff --git a/tests/test_cliLog.py b/tests/test_cliLog.py index 2032fabae4..c9472b79f2 100644 --- a/tests/test_cliLog.py +++ b/tests/test_cliLog.py @@ -42,7 +42,8 @@ class CliLogTestCase(CliLogTestBase, unittest.TestCase): directly depend on that package. When running in an environment where `lsst.log` is setup then this will test use of `lsst.log`. This test also runs in obs_base which does provide coverage of python `logging` and - `lsst.log` in CI.""" + `lsst.log` in CI. + """ pass diff --git a/tests/test_cliPluginLoader.py b/tests/test_cliPluginLoader.py index 02270ee154..83277730b8 100644 --- a/tests/test_cliPluginLoader.py +++ b/tests/test_cliPluginLoader.py @@ -120,7 +120,8 @@ def test_getLocalCommands(self): def test_mergeCommandLists(self): """Verify dicts of command to list-of-source-package get merged - properly.""" + properly. + """ first = defaultdict(list, {"a": [1]}) second = defaultdict(list, {"b": [2]}) self.assertEqual(butler.LoaderCLI._mergeCommandLists(first, second), {"a": [1], "b": [2]}) diff --git a/tests/test_cliUtilSplitCommas.py b/tests/test_cliUtilSplitCommas.py index 13046abba7..89863b9183 100644 --- a/tests/test_cliUtilSplitCommas.py +++ b/tests/test_cliUtilSplitCommas.py @@ -42,7 +42,7 @@ def setUp(self): self.runner = LogCliRunner() def test_separate(self): - """test the split_commas callback by itself""" + """Test the split_commas callback by itself.""" ctx = "unused" param = "unused" self.assertEqual( @@ -51,41 +51,45 @@ def test_separate(self): self.assertEqual(split_commas(ctx, param, None), tuple()) def test_single(self): - """test the split_commas callback in an option with one value""" + """Test the split_commas callback in an option with one value.""" result = self.runner.invoke(cli, ["-l", "one"]) self.assertEqual(result.exit_code, 0, msg=clickResultMsg(result)) mock.assert_called_with(("one",)) def test_multiple(self): - """test the split_commas callback in an option with two single - values""" + """Test the split_commas callback in an option with two single + values. + """ result = self.runner.invoke(cli, ["-l", "one", "-l", "two"]) self.assertEqual(result.exit_code, 0, msg=clickResultMsg(result)) mock.assert_called_with(("one", "two")) def test_singlePair(self): - """test the split_commas callback in an option with one pair of - values""" + """Test the split_commas callback in an option with one pair of + values. + """ result = self.runner.invoke(cli, ["-l", "one,two"]) self.assertEqual(result.exit_code, 0, msg=clickResultMsg(result)) mock.assert_called_with(("one", "two")) def test_multiplePair(self): - """test the split_commas callback in an option with two pairs of - values""" + """Test the split_commas callback in an option with two pairs of + values. + """ result = self.runner.invoke(cli, ["-l", "one,two", "-l", "three,four"]) self.assertEqual(result.exit_code, 0, msg=clickResultMsg(result)) mock.assert_called_with(("one", "two", "three", "four")) def test_none(self): - """test that passing None does not fail and returns None, producing an - empty tuple in the command function call.""" + """Test that passing None does not fail and returns None, producing an + empty tuple in the command function call. + """ result = self.runner.invoke(cli, []) self.assertEqual(result.exit_code, 0, msg=clickResultMsg(result)) mock.assert_called_with(()) def test_parens(self): - """Test that split commas understands [a,b]""" + """Test that split commas understands ``[a, b]``.""" for test, expected in ( ("single", ("single",)), ("a,b", ("a", "b")), diff --git a/tests/test_cliUtilSplitKv.py b/tests/test_cliUtilSplitKv.py index eb06c08aeb..2d54a0f93b 100644 --- a/tests/test_cliUtilSplitKv.py +++ b/tests/test_cliUtilSplitKv.py @@ -39,7 +39,8 @@ def test_single_dict(self): def test_single_tuple(self): """Test that a single kv pair converts to a tuple when - return_type=tuple.""" + return_type=tuple. + """ self.assertEqual(split_kv("context", "param", "first=1", return_type=tuple), (("first", "1"),)) def test_multiple_dict(self): @@ -48,7 +49,8 @@ def test_multiple_dict(self): def test_multiple_tuple(self): """Test that multiple comma separated kv pairs convert to a tuple when - return_type=tuple.""" + return_type=tuple. + """ self.assertEqual( split_kv("context", "param", "first=1,second=2", return_type=tuple), (("first", "1"), ("second", "2")), @@ -56,7 +58,8 @@ def test_multiple_tuple(self): def test_unseparated(self): """Test that a value without a key converts to a kv pair with an empty - string key.""" + string key. + """ self.assertEqual( split_kv("context", "param", "first,second=2", unseparated_okay=True), {"": "first", "second": "2"}, @@ -79,18 +82,21 @@ def test_wrongSeparator(self): def test_missingSeparator(self): """Test that an input with no separator raises when - unseparated_okay=False (this is the default value).""" + unseparated_okay=False (this is the default value). + """ with self.assertRaises(click.ClickException): split_kv("context", "param", "first 1") def test_unseparatedOkay(self): """Test that that the default key is used for values without a - separator when unseparated_okay=True.""" + separator when unseparated_okay=True. + """ self.assertEqual(split_kv("context", "param", "foo", unseparated_okay=True), {"": "foo"}) def test_unseparatedOkay_list(self): """Test that that the default key is used for values without a - separator when unseparated_okay=True and the return_type is tuple.""" + separator when unseparated_okay=True and the return_type is tuple. + """ self.assertEqual( split_kv("context", "param", "foo,bar", unseparated_okay=True, return_type=tuple), (("", "foo"), ("", "bar")), @@ -98,7 +104,8 @@ def test_unseparatedOkay_list(self): def test_unseparatedOkay_defaultKey(self): """Test that that the default key can be set and is used for values - without a separator when unseparated_okay=True.""" + without a separator when unseparated_okay=True. + """ self.assertEqual( split_kv("context", "param", "foo", unseparated_okay=True, default_key=...), {...: "foo"} ) diff --git a/tests/test_cliUtilToUpper.py b/tests/test_cliUtilToUpper.py index 69cd00f906..1669ea40de 100644 --- a/tests/test_cliUtilToUpper.py +++ b/tests/test_cliUtilToUpper.py @@ -39,25 +39,25 @@ def setUp(self): self.runner = LogCliRunner() def test_isolated(self): - """test the to_upper callback by itself""" + """Test the to_upper callback by itself.""" ctx = "unused" param = "unused" self.assertEqual(to_upper(ctx, param, "debug"), "DEBUG") def test_lowerToUpper(self): - """test the to_upper callback in an option with a lowercase value""" + """Test the to_upper callback in an option with a lowercase value.""" result = self.runner.invoke(cli, ["--value", "debug"]) self.assertEqual(result.exit_code, 0) self.assertEqual(result.stdout, "DEBUG\n") def test_upperToUpper(self): - """test the to_upper callback in an option with a uppercase value""" + """Test the to_upper callback in an option with a uppercase value.""" result = self.runner.invoke(cli, ["--value", "DEBUG"]) self.assertEqual(result.exit_code, 0) self.assertEqual(result.stdout, "DEBUG\n") def test_mixedToUpper(self): - """test the to_upper callback in an option with a mixed-case value""" + """Test the to_upper callback in an option with a mixed-case value.""" result = self.runner.invoke(cli, ["--value", "DeBuG"]) self.assertEqual(result.exit_code, 0) self.assertEqual(result.stdout, "DEBUG\n") diff --git a/tests/test_cliUtils.py b/tests/test_cliUtils.py index af3422e5f6..a5a27ea352 100644 --- a/tests/test_cliUtils.py +++ b/tests/test_cliUtils.py @@ -63,7 +63,8 @@ def testHelpWrapped(self): def cli(): """The cli help - message.""" + message. + """ pass self.runTest(cli) @@ -72,7 +73,8 @@ def runTest(self, cli): """Tests `utils.addArgumentHelp` and its use in repo_argument and directory_argument; verifies that the argument help gets added to the command function help, and that it's added in the correct order. See - addArgumentHelp for more details.""" + addArgumentHelp for more details. + """ expected = """Usage: cli [OPTIONS] REPO DIRECTORY The cli help message. @@ -141,7 +143,8 @@ def test_addElipsisToMultiple(self): `multiple=True` The default behavior of click is to not add elipsis to options that - have `multiple=True`.""" + have `multiple=True`. + """ @click.command() @click.option("--things", cls=MWOption, multiple=True) @@ -160,7 +163,8 @@ def test_addElipsisToNargs(self): The default behavior of click is to add elipsis when nargs does not equal 1, but it does not put a space before the elipsis and we prefer - a space between the metavar and the elipsis.""" + a space between the metavar and the elipsis. + """ for numberOfArgs in (0, 1, 2): # nargs must be >= 0 for an option @click.command() @@ -194,7 +198,8 @@ def test_help(self): Verify that MWArgument adds " ..." after the option metavar when `nargs` != 1. The default behavior of click is to add elipsis when nargs does not equal 1, but it does not put a space before the elipsis - and we prefer a space between the metavar and the elipsis.""" + and we prefer a space between the metavar and the elipsis. + """ # nargs can be -1 for any number of args, or >= 1 for a specified # number of arguments. @@ -267,7 +272,8 @@ def cli(test): def testOverride(self): """Test using the MWOptionDecorator with a command and overriding one - of the default values.""" + of the default values. + """ mock = MagicMock() @click.command() @@ -283,7 +289,8 @@ def cli(test): class SectionOptionTest(unittest.TestCase): """Tests for the option_section decorator that inserts section break - headings between options in the --help output of a command.""" + headings between options in the --help output of a command. + """ @staticmethod @click.command() @@ -298,7 +305,8 @@ def setUp(self): def test_section_help(self): """Verify that the section break is printed in the help output in the - expected location and with expected formatting.""" + expected location and with expected formatting. + """ result = self.runner.invoke(self.cli, ["--help"]) # \x20 is a space, added explicitly below to prevent the # normally-helpful editor setting "remove trailing whitespace" from @@ -339,7 +347,8 @@ def setUp(self): def test_exist(self): """Test the exist argument, verify that True means the file must exist, False means the file must not exist, and None means that the file may - or may not exist.""" + or may not exist. + """ with self.runner.isolated_filesystem(): mustExistCmd = self.getCmd(exists=True) mayExistCmd = self.getCmd(exists=None) diff --git a/tests/test_config.py b/tests/test_config.py index 39b77a5842..aa1e9fe646 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -402,7 +402,6 @@ def testHierarchy(self): def testSerializedString(self): """Test that we can create configs from strings""" - serialized = { "yaml": """ testing: hello @@ -458,7 +457,6 @@ def testPathlib(self): def testDefaults(self): """Read of defaults""" - # Supply the search path explicitly c = ConfigTest(searchPaths=(self.configDir,)) self.assertIsInstance(c, ConfigSubset) @@ -504,7 +502,6 @@ def testExternalHierarchy(self): def testNoDefaults(self): """Ensure that defaults can be turned off.""" - # Mandatory keys but no defaults c = ConfigTest({"item1": "a", "item2": "b", "item6": 6}) self.assertEqual(len(c.filesRead), 0) @@ -595,7 +592,6 @@ def testInclude(self): def testStringInclude(self): """Using include directives in strings""" - # See if include works for absolute path c = Config.fromYaml(f"something: !include {os.path.join(self.configDir, 'testconfig.yaml')}") self.assertEqual(c["something", "comp", "item3"], 3) @@ -607,7 +603,8 @@ def testStringInclude(self): def testIncludeConfigs(self): """Test the special includeConfigs key for pulling in additional - files.""" + files. + """ c = Config(os.path.join(self.configDir, "configIncludes.yaml")) self.assertEqual(c["comp", "item2"], "hello") self.assertEqual(c["comp", "item50"], 5000) @@ -679,7 +676,6 @@ def tearDown(self): def testDump(self): """Test that we can write and read a configuration.""" - c = Config({"1": 2, "3": 4, "key3": 6, "dict": {"a": 1, "b": 2}}) for format in ("yaml", "json"): diff --git a/tests/test_datastore.py b/tests/test_datastore.py index bdc2463e02..f86facb61c 100644 --- a/tests/test_datastore.py +++ b/tests/test_datastore.py @@ -277,7 +277,6 @@ def testBasicPutGet(self) -> None: def testTrustGetRequest(self) -> None: """Check that we can get datasets that registry knows nothing about.""" - datastore = self.makeDatastore() # Skip test if the attribute is not defined @@ -712,7 +711,8 @@ def failInputDoesNotExist(obj: MetricsExample, path: str, ref: DatasetRef) -> No def failOutsideRoot(obj: MetricsExample, path: str, ref: DatasetRef) -> None: """Can't ingest files outside of datastore root unless - auto.""" + auto. + """ if mode == "auto": datastore.ingest(FileDataset(path=os.path.abspath(path), refs=ref), transfer=mode) self.assertTrue(datastore.exists(ref)) @@ -740,7 +740,8 @@ def testIngestTransfer(self) -> None: def succeed(obj: MetricsExample, path: str, ref: DatasetRef) -> None: """Ingest a file by transferring it to the template - location.""" + location. + """ datastore.ingest(FileDataset(path=os.path.abspath(path), refs=ref), transfer=mode) self.assertEqual(obj, datastore.get(ref)) @@ -908,7 +909,8 @@ def test_pydantic_dict_storage_class_conversions(self) -> None: def test_simple_class_put_get(self) -> None: """Test that we can put and get a simple class with dict() - constructor.""" + constructor. + """ datastore = self.makeDatastore() data = MetricsExample(summary={"a": 1}, data=[1, 2, 3], output={"b": 2}) self._assert_different_puts(datastore, "MetricsExample", data) @@ -984,8 +986,8 @@ def testAtomicWrite(self) -> None: def testCanNotDeterminePutFormatterLocation(self) -> None: """Verify that the expected exception is raised if the FileDatastore - can not determine the put formatter location.""" - + can not determine the put formatter location. + """ _ = makeExampleMetrics() datastore = self.makeDatastore() @@ -1027,7 +1029,6 @@ class PosixDatastoreNoChecksumsTestCase(PosixDatastoreTestCase): def testChecksum(self) -> None: """Ensure that checksums have not been calculated.""" - datastore = self.makeDatastore() storageClass = self.storageClassFactory.getStorageClass("StructuredData") dimensions = self.universe.extract(("visit", "physical_filter")) @@ -1184,7 +1185,8 @@ class DatastoreConstraintsTests(DatastoreTestsBase): def testConstraints(self) -> None: """Test constraints model. Assumes that each test class has the - same constraints.""" + same constraints. + """ metrics = makeExampleMetrics() datastore = self.makeDatastore() @@ -1242,7 +1244,7 @@ def setUp(self) -> None: class InMemoryDatastoreConstraintsTestCase(DatastoreConstraintsTests, unittest.TestCase): - """InMemoryDatastore specialization""" + """InMemoryDatastore specialization.""" configFile = os.path.join(TESTDIR, "config/basic/inMemoryDatastoreP.yaml") canIngest = False @@ -1250,19 +1252,20 @@ class InMemoryDatastoreConstraintsTestCase(DatastoreConstraintsTests, unittest.T class ChainedDatastoreConstraintsNativeTestCase(PosixDatastoreConstraintsTestCase): """ChainedDatastore specialization using a POSIXDatastore and constraints - at the ChainedDatstore""" + at the ChainedDatstore. + """ configFile = os.path.join(TESTDIR, "config/basic/chainedDatastorePa.yaml") class ChainedDatastoreConstraintsTestCase(PosixDatastoreConstraintsTestCase): - """ChainedDatastore specialization using a POSIXDatastore""" + """ChainedDatastore specialization using a POSIXDatastore.""" configFile = os.path.join(TESTDIR, "config/basic/chainedDatastoreP.yaml") class ChainedDatastoreMemoryConstraintsTestCase(InMemoryDatastoreConstraintsTestCase): - """ChainedDatastore specialization using all InMemoryDatastore""" + """ChainedDatastore specialization using all InMemoryDatastore.""" configFile = os.path.join(TESTDIR, "config/basic/chainedDatastore2P.yaml") canIngest = False @@ -1270,7 +1273,8 @@ class ChainedDatastoreMemoryConstraintsTestCase(InMemoryDatastoreConstraintsTest class ChainedDatastorePerStoreConstraintsTests(DatastoreTestsBase, unittest.TestCase): """Test that a chained datastore can control constraints per-datastore - even if child datastore would accept.""" + even if child datastore would accept. + """ configFile = os.path.join(TESTDIR, "config/basic/chainedDatastorePb.yaml") diff --git a/tests/test_exprParserLex.py b/tests/test_exprParserLex.py index f8fbdb4d9d..adb4b75212 100644 --- a/tests/test_exprParserLex.py +++ b/tests/test_exprParserLex.py @@ -48,7 +48,6 @@ def tearDown(self): def testInstantiate(self): """Tests for making ParserLex instances""" - default_reflags = re.IGNORECASE | re.VERBOSE lexer = ParserLex.make_lexer() self.assertEqual(lexer.lexreflags, default_reflags) diff --git a/tests/test_exprParserYacc.py b/tests/test_exprParserYacc.py index 1a7a065026..a722508904 100644 --- a/tests/test_exprParserYacc.py +++ b/tests/test_exprParserYacc.py @@ -519,7 +519,6 @@ def testStr(self): def testVisit(self): """Test for visitor methods""" - # test should cover all visit* methods parser = ParserYacc() visitor = _Visitor() @@ -549,7 +548,6 @@ def testVisit(self): def testParseTimeStr(self): """Test for _parseTimeString method""" - # few expected failures bad_times = [ "", diff --git a/tests/test_expressions.py b/tests/test_expressions.py index 9dd538ac13..1a5ff8c9c8 100644 --- a/tests/test_expressions.py +++ b/tests/test_expressions.py @@ -102,7 +102,6 @@ def test_ingest_date(self): def test_bind(self): """Test with bind parameters""" - self.assertEqual( make_string_expression_predicate( "a > b OR t in (x, y, z)", @@ -126,7 +125,6 @@ def test_bind(self): def test_bind_list(self): """Test with bind parameter which is list/tuple/set inside IN rhs.""" - self.assertEqual( make_string_expression_predicate( "a > b OR t in (x)", @@ -231,7 +229,6 @@ class InspectionVisitorTestCase(unittest.TestCase): def test_simple(self): """Test for simple expressions""" - universe = DimensionUniverse() parser = ParserYacc() @@ -269,7 +266,6 @@ def test_simple(self): def test_bind(self): """Test for simple expressions with binds.""" - universe = DimensionUniverse() parser = ParserYacc() @@ -305,7 +301,6 @@ def test_bind(self): def test_in(self): """Test for IN expressions.""" - universe = DimensionUniverse() parser = ParserYacc() @@ -352,7 +347,6 @@ class CheckVisitorTestCase(unittest.TestCase): def test_governor(self): """Test with governor dimension in expression""" - parser = ParserYacc() universe = DimensionUniverse() diff --git a/tests/test_formatter.py b/tests/test_formatter.py index 929e644f2d..53f5ed727c 100644 --- a/tests/test_formatter.py +++ b/tests/test_formatter.py @@ -64,8 +64,8 @@ def setUp(self): def assertIsFormatter(self, formatter): """Check that the supplied parameter is either a Formatter instance - or Formatter class.""" - + or Formatter class. + """ if inspect.isclass(formatter): self.assertTrue(issubclass(formatter, Formatter), f"Is {formatter} a Formatter") else: @@ -95,7 +95,6 @@ def testFormatter(self): def testExtensionValidation(self): """Test extension validation""" - for file, single_ok, multi_ok in ( ("e.fits", True, True), ("e.fit", False, True), diff --git a/tests/test_logFormatter.py b/tests/test_logFormatter.py index 487aeb4848..254043e9d0 100644 --- a/tests/test_logFormatter.py +++ b/tests/test_logFormatter.py @@ -70,7 +70,6 @@ def testButlerLogRecordsFormatter(self): def testJsonLogRecordsFormatter(self): """Test that externally created JSON format stream files work.""" - log = logging.getLogger(self.id()) log.setLevel(logging.INFO) diff --git a/tests/test_logging.py b/tests/test_logging.py index 1d06c2d70a..397a563061 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -52,7 +52,6 @@ def tearDown(self): def testRecordCapture(self): """Test basic log capture and serialization.""" - self.log.setLevel(VERBOSE) test_messages = ( @@ -130,7 +129,6 @@ def testRecordsFormatting(self): def testButlerLogRecords(self): """Test the list-like methods of ButlerLogRecords.""" - self.log.setLevel(logging.INFO) n_messages = 10 diff --git a/tests/test_obscore.py b/tests/test_obscore.py index 2ee0ba65a0..d50b5b620d 100644 --- a/tests/test_obscore.py +++ b/tests/test_obscore.py @@ -81,7 +81,6 @@ def make_registry_config( def initialize_registry(self, registry: Registry) -> None: """Populate Registry with the things that we need for tests.""" - registry.insertDimensionData("instrument", {"name": "DummyCam"}) registry.insertDimensionData( "physical_filter", {"instrument": "DummyCam", "name": "d-r", "band": "r"} @@ -250,7 +249,6 @@ def _insert_datasets(self, registry: Registry, do_import: bool = False) -> list[ def test_config_errors(self): """Test for handling various configuration problems.""" - # This raises pydantic ValidationError, which wraps ValueError exception_re = "'collections' must have one element" with self.assertRaisesRegex(ValueError, exception_re): @@ -268,7 +266,6 @@ def test_config_errors(self): def test_schema(self): """Check how obscore schema is constructed""" - config = ObsCoreConfig(obs_collection="", dataset_types=[], facility_name="FACILITY") schema = ObsCoreSchema(config, []) table_spec = schema.table_spec @@ -351,7 +348,6 @@ def test_insert_existing_collection(self): """Test insert and import registry methods, with various restrictions on collection names. """ - # First item is collections, second item is expected record count. test_data = ( (None, 6), @@ -378,7 +374,6 @@ def test_insert_existing_collection(self): def test_drop_datasets(self): """Test for dropping datasets after obscore insert.""" - collections = None registry = self.make_registry(collections) obscore = registry.obsCoreTableManager @@ -403,7 +398,6 @@ def test_drop_datasets(self): def test_associate(self): """Test for associating datasets to TAGGED collection.""" - collections = ["tagged"] registry = self.make_registry(collections, "TAGGED") obscore = registry.obsCoreTableManager @@ -445,7 +439,6 @@ def test_associate(self): def test_region_type_warning(self) -> None: """Test that non-polygon region generates one or more warnings.""" - collections = None registry = self.make_registry(collections) @@ -460,7 +453,6 @@ def test_region_type_warning(self) -> None: def test_update_exposure_region(self) -> None: """Test for update_exposure_regions method.""" - registry = self.make_registry(["run1"]) obscore = registry.obsCoreTableManager assert obscore is not None @@ -604,7 +596,6 @@ def make_obscore_config( def test_spatial(self): """Test that pgsphere plugin fills spatial columns.""" - collections = None registry = self.make_registry(collections) obscore = registry.obsCoreTableManager diff --git a/tests/test_postgresql.py b/tests/test_postgresql.py index 2dd35668c8..d40fbec1c8 100644 --- a/tests/test_postgresql.py +++ b/tests/test_postgresql.py @@ -210,11 +210,11 @@ def subquery(alias: str) -> sqlalchemy.sql.FromClause: class PostgresqlRegistryTests(RegistryTests): """Tests for `Registry` backed by a PostgreSQL database. - Note - ---- + Notes + ----- This is not a subclass of `unittest.TestCase` but to avoid repetition it defines methods that override `unittest.TestCase` methods. To make this - work sublasses have to have this class first in the bases list. + work subclasses have to have this class first in the bases list. """ @classmethod diff --git a/tests/test_quantumBackedButler.py b/tests/test_quantumBackedButler.py index 882c148007..f2382d50b9 100644 --- a/tests/test_quantumBackedButler.py +++ b/tests/test_quantumBackedButler.py @@ -103,7 +103,6 @@ def tearDown(self) -> None: def make_quantum(self, step: int = 1) -> Quantum: """Make a Quantum which includes datastore records.""" - if step == 1: datastore_records = self.butler.datastore.export_records(self.all_input_refs) predictedInputs = {self.datasetTypeInput: self.input_refs} @@ -128,7 +127,6 @@ def make_quantum(self, step: int = 1) -> Quantum: def test_initialize(self) -> None: """Test for initialize factory method""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -137,7 +135,6 @@ def test_initialize(self) -> None: def test_initialize_repo_index(self) -> None: """Test for initialize using config file and repo index.""" - # Store config to a file. self.config.dumpToUri(self.root) @@ -158,7 +155,6 @@ def test_initialize_repo_index(self) -> None: def test_from_predicted(self) -> None: """Test for from_predicted factory method""" - datastore_records = self.butler.datastore.export_records(self.all_input_refs) qbb = QuantumBackedButler.from_predicted( config=self.config, @@ -172,7 +168,6 @@ def test_from_predicted(self) -> None: def _test_factory(self, qbb: QuantumBackedButler) -> None: """Test state immediately after construction.""" - self.assertTrue(qbb.isWriteable()) self.assertEqual(qbb._predicted_inputs, {ref.id for ref in self.all_input_refs}) self.assertEqual(qbb._predicted_outputs, {ref.id for ref in self.output_refs}) @@ -183,7 +178,6 @@ def _test_factory(self, qbb: QuantumBackedButler) -> None: def test_getput(self) -> None: """Test for getDirect/putDirect methods""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -217,7 +211,6 @@ def test_getput(self) -> None: def test_getDeferred(self) -> None: """Test for getDirectDeferred method""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -243,7 +236,6 @@ def test_getDeferred(self) -> None: def test_datasetExistsDirect(self) -> None: """Test for dataset existence method""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -278,7 +270,6 @@ def test_datasetExistsDirect(self) -> None: def test_markInputUnused(self) -> None: """Test for markInputUnused method""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -300,7 +291,6 @@ def test_markInputUnused(self) -> None: def test_pruneDatasets(self) -> None: """Test for pruneDatasets methods""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -345,7 +335,6 @@ def test_pruneDatasets(self) -> None: def test_extract_provenance_data(self) -> None: """Test for extract_provenance_data method""" - quantum = self.make_quantum() qbb = QuantumBackedButler.initialize( config=self.config, quantum=quantum, dimensions=self.universe, dataset_types=self.dataset_types @@ -385,7 +374,6 @@ def test_extract_provenance_data(self) -> None: def test_collect_and_transfer(self) -> None: """Test for collect_and_transfer method""" - quantum1 = self.make_quantum(1) qbb1 = QuantumBackedButler.initialize( config=self.config, quantum=quantum1, dimensions=self.universe, dataset_types=self.dataset_types diff --git a/tests/test_simpleButler.py b/tests/test_simpleButler.py index ba0b117320..2a25317f05 100644 --- a/tests/test_simpleButler.py +++ b/tests/test_simpleButler.py @@ -273,7 +273,6 @@ def testCollectionTransfers(self): def testButlerGet(self): """Test that butler.get can work with different variants.""" - # Import data to play with. butler = self.makeButler(writeable=True) butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml")) @@ -633,7 +632,6 @@ def testJsonDimensionRecordsAndHtmlRepresentation(self): def testWildcardQueries(self): """Test that different collection type queries work.""" - # Import data to play with. butler = self.makeButler(writeable=True) butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml")) diff --git a/tests/test_sqlite.py b/tests/test_sqlite.py index aa3011ced6..0b9d71bc75 100644 --- a/tests/test_sqlite.py +++ b/tests/test_sqlite.py @@ -178,8 +178,8 @@ def testTransactionLocking(self): class SqliteFileRegistryTests(RegistryTests): """Tests for `Registry` backed by a SQLite file-based database. - Note - ---- + Notes + ----- This is not a subclass of `unittest.TestCase` but to avoid repetition it defines methods that override `unittest.TestCase` methods. To make this work sublasses have to have this class first in the bases list. diff --git a/tests/test_storageClass.py b/tests/test_storageClass.py index 1205e1e2a1..343f9e9803 100644 --- a/tests/test_storageClass.py +++ b/tests/test_storageClass.py @@ -63,7 +63,8 @@ class StorageClassFactoryTestCase(unittest.TestCase): def testCreation(self): """Test that we can dynamically create storage class subclasses. - This is critical for testing the factory functions.""" + This is critical for testing the factory functions. + """ className = "TestImage" sc = StorageClass(className, pytype=dict) self.assertIsInstance(sc, StorageClass) @@ -180,7 +181,8 @@ def testTypeEquality(self): def testRegistry(self): """Check that storage classes can be created on the fly and stored - in a registry.""" + in a registry. + """ className = "TestImage" factory = StorageClassFactory() newclass = StorageClass(className, pytype=PythonType) @@ -315,7 +317,6 @@ def _convert_type(cls, data): def testConverters(self): """Test conversion maps.""" - className = "TestConverters" converters = { "lsst.daf.butler.tests.MetricsExample": "lsst.daf.butler.tests.MetricsExample.exportAsDict", diff --git a/tests/test_templates.py b/tests/test_templates.py index 48260a9bb8..dab247d01f 100644 --- a/tests/test_templates.py +++ b/tests/test_templates.py @@ -49,7 +49,6 @@ def makeDatasetRef( self, datasetTypeName, dataId=None, storageClassName="DefaultStorageClass", run="run2", conform=True ): """Make a simple DatasetRef""" - if dataId is None: dataId = self.dataId if "physical_filter" in dataId and "band" not in dataId: diff --git a/tests/test_timespan.py b/tests/test_timespan.py index 1dcd7f86da..270d03b08a 100644 --- a/tests/test_timespan.py +++ b/tests/test_timespan.py @@ -225,7 +225,6 @@ def testTimescales(self): def testFuture(self): """Check that we do not get warnings from future dates.""" - # Astropy will give "dubious year" for UTC five years in the future # so hide these expected warnings from the test output with warnings.catch_warnings(): diff --git a/tests/test_versioning.py b/tests/test_versioning.py index 06824c2f68..44d073c022 100644 --- a/tests/test_versioning.py +++ b/tests/test_versioning.py @@ -100,7 +100,6 @@ def makeEmptyDatabase(self, origin: int = 0) -> Database: def test_new_schema(self) -> None: """Test for creating new database schema.""" - # Check that managers know what schema versions they can make. Manager1.checkNewSchemaVersion(V_1_0_0) Manager2.checkNewSchemaVersion(V_1_0_0) @@ -149,7 +148,6 @@ def test_new_schema(self) -> None: def test_existing_schema(self) -> None: """Test for reading manager versions from existing database.""" - manager_versions = ( ((None, V_1_0_0), (None, V_1_0_0)), ((V_1_0_0, V_1_0_0), (V_1_0_0, V_1_0_0)), @@ -194,7 +192,6 @@ def test_existing_schema(self) -> None: def test_compatibility(self) -> None: """Test for version compatibility rules.""" - # Manager, version, update, compatible compat_matrix = ( (Manager0, V_1_0_0, False, True), From aaaac6e99f061d49294b8acd1aa2bb20e6179832 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 14:10:41 -0700 Subject: [PATCH 04/16] Fix D401 doc problems --- python/lsst/daf/butler/_butler.py | 2 +- .../lsst/daf/butler/_deferredDatasetHandle.py | 2 +- python/lsst/daf/butler/_quantum_backed.py | 4 ++- python/lsst/daf/butler/cli/butler.py | 8 +++-- python/lsst/daf/butler/cli/progress.py | 4 +-- python/lsst/daf/butler/cli/utils.py | 35 +++++++++++-------- .../daf/butler/datastores/fileDatastore.py | 4 ++- python/lsst/daf/butler/formatters/file.py | 2 +- python/lsst/daf/butler/registry/_config.py | 4 +-- .../daf/butler/registry/connectionString.py | 2 +- .../datasets/byDimensions/_storage.py | 6 ++-- .../daf/butler/registry/dimensions/table.py | 2 +- .../registry/queries/expressions/check.py | 2 +- .../queries/expressions/normalForm.py | 3 +- .../queries/expressions/parser/exprTree.py | 2 +- .../queries/expressions/parser/parserLex.py | 2 +- .../daf/butler/registry/tests/_database.py | 2 +- .../daf/butler/registry/tests/_registry.py | 4 +-- python/lsst/daf/butler/tests/_testRepo.py | 4 +-- tests/data/registry/spatial.py | 2 +- tests/test_butler.py | 12 ++++--- tests/test_cliCmdQueryDataIds.py | 4 +-- tests/test_cliPluginLoader.py | 2 +- tests/test_cliUtils.py | 9 ++--- tests/test_config.py | 4 +-- tests/test_query_relations.py | 2 +- 26 files changed, 70 insertions(+), 59 deletions(-) diff --git a/python/lsst/daf/butler/_butler.py b/python/lsst/daf/butler/_butler.py index e51c7f0d4d..9b9fe8d8a3 100644 --- a/python/lsst/daf/butler/_butler.py +++ b/python/lsst/daf/butler/_butler.py @@ -1409,7 +1409,7 @@ def getURIs( run: str | None = None, **kwargs: Any, ) -> DatasetRefURIs: - """Returns the URIs associated with the dataset. + """Return the URIs associated with the dataset. Parameters ---------- diff --git a/python/lsst/daf/butler/_deferredDatasetHandle.py b/python/lsst/daf/butler/_deferredDatasetHandle.py index 799f61dadf..3b6cf63d75 100644 --- a/python/lsst/daf/butler/_deferredDatasetHandle.py +++ b/python/lsst/daf/butler/_deferredDatasetHandle.py @@ -44,7 +44,7 @@ def get( parameters: dict | None = None, storageClass: str | StorageClass | None = None, ) -> Any: - """Retrieves the dataset pointed to by this handle. + """Retrieve the dataset pointed to by this handle. This handle may be used multiple times, possibly with different parameters. diff --git a/python/lsst/daf/butler/_quantum_backed.py b/python/lsst/daf/butler/_quantum_backed.py index a9995806f0..9f44492758 100644 --- a/python/lsst/daf/butler/_quantum_backed.py +++ b/python/lsst/daf/butler/_quantum_backed.py @@ -304,7 +304,9 @@ def _initialize( search_paths: list[str] | None = None, dataset_types: Mapping[str, DatasetType] | None = None, ) -> QuantumBackedButler: - """Internal method with common implementation used by `initialize` and + """Initialize quantum-backed butler. + + Internal method with common implementation used by `initialize` and `for_output`. Parameters diff --git a/python/lsst/daf/butler/cli/butler.py b/python/lsst/daf/butler/cli/butler.py index 38f33b0302..1de9788d54 100755 --- a/python/lsst/daf/butler/cli/butler.py +++ b/python/lsst/daf/butler/cli/butler.py @@ -126,9 +126,11 @@ def getLocalCommands(self) -> defaultdict[str, list[str]]: ) def list_commands(self, ctx: click.Context) -> list[str]: - """Used by Click to get all the commands that can be called by the + """Gget all the commands that can be called by the butler command, it is used to generate the --help output. + Used by Click. + Parameters ---------- ctx : `click.Context` @@ -145,7 +147,9 @@ def list_commands(self, ctx: click.Context) -> list[str]: return sorted(commands) def get_command(self, ctx: click.Context, name: str) -> click.Command | None: - """Used by Click to get a single command for execution. + """Get a single command for execution. + + Used by Click. Parameters ---------- diff --git a/python/lsst/daf/butler/cli/progress.py b/python/lsst/daf/butler/cli/progress.py index 6133ebc903..f4ec73420a 100644 --- a/python/lsst/daf/butler/cli/progress.py +++ b/python/lsst/daf/butler/cli/progress.py @@ -50,7 +50,7 @@ def __init__(self, **kwargs: Any): @classmethod def callback(cls, ctx: click.Context, params: click.Parameter, value: Any) -> None: - """A `click` callback that installs this handler as the global handler + """`click` callback that installs this handler as the global handler for progress bars. Should usually be called only by the `option` method. @@ -62,7 +62,7 @@ def callback(cls, ctx: click.Context, params: click.Parameter, value: Any) -> No @classmethod def option(cls, cmd: Any) -> Any: - """A `click` command decorator that adds a ``--progress`` option + """`click` command decorator that adds a ``--progress`` option that installs a default-constructed instance of this progress handler. """ return click.option( diff --git a/python/lsst/daf/butler/cli/utils.py b/python/lsst/daf/butler/cli/utils.py index a05e2af721..8be1094bd2 100644 --- a/python/lsst/daf/butler/cli/utils.py +++ b/python/lsst/daf/butler/cli/utils.py @@ -174,7 +174,7 @@ def clickResultMsg(result: click.testing.Result) -> str: @contextmanager def command_test_env(runner: click.testing.CliRunner, commandModule: str, commandName: str) -> Iterator[None]: - """A context manager that creates (and then cleans up) an environment that + """Context manager that creates (and then cleans up) an environment that provides a CLI plugin command with the given name. Parameters @@ -604,7 +604,9 @@ def __init__( ) def convert(self, value: str, param: click.Parameter | None, ctx: click.Context | None) -> Any: - """Called by click.ParamType to "convert values through types". + """Convert values through types. + + Called by `click.ParamType` to "convert values through types". `click.Path` uses this step to verify Path conditions. """ if self.mustNotExist and os.path.exists(value): @@ -616,14 +618,16 @@ class MWOption(click.Option): """Overrides click.Option with desired behaviors.""" def make_metavar(self) -> str: - """Overrides `click.Option.make_metavar`. Makes the metavar for the - help menu. Adds a space and an elipsis after the metavar name if + """Make the metavar for the help menu. + + Overrides `click.Option.make_metavar`. + Adds a space and an elipsis after the metavar name if the option accepts multiple inputs, otherwise defers to the base implementation. - By default click does not add an elipsis when multiple is True and - nargs is 1. And when nargs does not equal 1 click adds an elipsis - without a space between the metavar and the elipsis, but we prefer a + By default click does not add an ellipsis when multiple is True and + nargs is 1. And when nargs does not equal 1 click adds an ellipsis + without a space between the metavar and the ellipsis, but we prefer a space between. Does not get called for some option types (e.g. flag) so metavar @@ -642,12 +646,14 @@ class MWArgument(click.Argument): """Overrides click.Argument with desired behaviors.""" def make_metavar(self) -> str: - """Overrides `click.Option.make_metavar`. Makes the metavar for the - help menu. Always adds a space and an elipsis (' ...') after the + """Make the metavar for the help menu. + + Overrides `click.Option.make_metavar`. + Always adds a space and an ellipsis (' ...') after the metavar name if the option accepts multiple inputs. - By default click adds an elipsis without a space between the metavar - and the elipsis, but we prefer a space between. + By default click adds an ellipsis without a space between the metavar + and the ellipsis, but we prefer a space between. Returns ------- @@ -961,8 +967,7 @@ def getFrom(ctx: click.Context) -> Any: def yaml_presets(ctx: click.Context, param: str, value: Any) -> None: - """Click callback that reads additional values from the supplied - YAML file. + """Read additional values from the supplied YAML file. Parameters ---------- @@ -1097,8 +1102,10 @@ def sortAstropyTable(table: Table, dimensions: list[Dimension], sort_first: list def catch_and_exit(func: Callable) -> Callable: - """Decorator which catches all exceptions, prints an exception traceback + """Catch all exceptions, prints an exception traceback and signals click to exit. + + Use as decorator. """ @wraps(func) diff --git a/python/lsst/daf/butler/datastores/fileDatastore.py b/python/lsst/daf/butler/datastores/fileDatastore.py index 48286a3210..42d047feb8 100644 --- a/python/lsst/daf/butler/datastores/fileDatastore.py +++ b/python/lsst/daf/butler/datastores/fileDatastore.py @@ -1387,7 +1387,9 @@ def _process_mexists_records( all_required: bool, artifact_existence: dict[ResourcePath, bool] | None = None, ) -> dict[DatasetRef, bool]: - """Helper function for mexists that checks the given records. + """Check given records for existence. + + Helper function for `mexists()`. Parameters ---------- diff --git a/python/lsst/daf/butler/formatters/file.py b/python/lsst/daf/butler/formatters/file.py index 392a90447b..1753f3c98e 100644 --- a/python/lsst/daf/butler/formatters/file.py +++ b/python/lsst/daf/butler/formatters/file.py @@ -262,7 +262,7 @@ def read(self, component: str | None = None) -> Any: return data def fromBytes(self, serializedDataset: bytes, component: str | None = None) -> Any: - """Reads serialized data into a Dataset or its component. + """Read serialized data into a Dataset or its component. Parameters ---------- diff --git a/python/lsst/daf/butler/registry/_config.py b/python/lsst/daf/butler/registry/_config.py index 4ccac70e76..67eaa33654 100644 --- a/python/lsst/daf/butler/registry/_config.py +++ b/python/lsst/daf/butler/registry/_config.py @@ -43,7 +43,7 @@ class RegistryConfig(ConfigSubset): defaultConfigFile = "registry.yaml" def getDialect(self) -> str: - """Parses the `db` key of the config and returns the database dialect. + """Parse the `db` key of the config and returns the database dialect. Returns ------- @@ -54,7 +54,7 @@ def getDialect(self) -> str: return conStr.get_backend_name() def getDatabaseClass(self) -> type[Database]: - """Returns the `Database` class targeted by configuration values. + """Return the `Database` class targeted by configuration values. The appropriate class is determined by parsing the `db` key to extract the dialect, and then looking that up under the `engines` key of the diff --git a/python/lsst/daf/butler/registry/connectionString.py b/python/lsst/daf/butler/registry/connectionString.py index 4e0f4ad63a..99a8a09d28 100644 --- a/python/lsst/daf/butler/registry/connectionString.py +++ b/python/lsst/daf/butler/registry/connectionString.py @@ -55,7 +55,7 @@ class ConnectionStringFactory: @classmethod def fromConfig(cls, registryConfig: RegistryConfig) -> url.URL: - """Parses the `db`, and, if they exist, username, password, host, port + """Parse the `db`, and, if they exist, username, password, host, port and database keys from the given config. If no username and password are found in the connection string, or in diff --git a/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py b/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py index 9aeae32c53..53ae026c57 100644 --- a/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py +++ b/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py @@ -419,11 +419,11 @@ def _finish_single_relation( collections: Sequence[tuple[CollectionRecord, int]], context: SqlQueryContext, ) -> Relation: - """Helper method for `make_relation`. - - This handles adding columns and WHERE terms that are not specific to + """Handle adding columns and WHERE terms that are not specific to either the tags or calibs tables. + Helper method for `make_relation`. + Parameters ---------- payload : `lsst.daf.relation.sql.Payload` diff --git a/python/lsst/daf/butler/registry/dimensions/table.py b/python/lsst/daf/butler/registry/dimensions/table.py index 8ceffd0bce..4969eede78 100644 --- a/python/lsst/daf/butler/registry/dimensions/table.py +++ b/python/lsst/daf/butler/registry/dimensions/table.py @@ -238,7 +238,7 @@ def make_spatial_join_relation( raise TypeError(f"Unexpected dimension element type for spatial join: {other}.") def _on_governor_insert(self, record: DimensionRecord) -> None: - """A `GovernorDimensionRecordStorage.registerInsertionListener` + """`GovernorDimensionRecordStorage.registerInsertionListener` callback for this element. Parameters diff --git a/python/lsst/daf/butler/registry/queries/expressions/check.py b/python/lsst/daf/butler/registry/queries/expressions/check.py index 5fffee9c1c..04379069d5 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/check.py +++ b/python/lsst/daf/butler/registry/queries/expressions/check.py @@ -88,7 +88,7 @@ def update(self, other: InspectionSummary) -> None: """ def make_column_tag_set(self, dataset_type_name: str | None) -> set[ColumnTag]: - """Transforms the columns captured here into a set of `ColumnTag` + """Transform the columns captured here into a set of `ColumnTag` objects. Parameters diff --git a/python/lsst/daf/butler/registry/queries/expressions/normalForm.py b/python/lsst/daf/butler/registry/queries/expressions/normalForm.py index 830684c577..b55ddb6c46 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/normalForm.py +++ b/python/lsst/daf/butler/registry/queries/expressions/normalForm.py @@ -1096,7 +1096,8 @@ def visitBranch(self, node: Node) -> Node: return node def _visitSequence(self, branches: Sequence[Node], operator: LogicalBinaryOperator) -> Node: - """Common recursive implementation for `visitInner` and `visitOuter`. + """Implement common recursive implementation for `visitInner` and + `visitOuter`. Parameters ---------- diff --git a/python/lsst/daf/butler/registry/queries/expressions/parser/exprTree.py b/python/lsst/daf/butler/registry/queries/expressions/parser/exprTree.py index 0eec0c8322..45e2741cc3 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/parser/exprTree.py +++ b/python/lsst/daf/butler/registry/queries/expressions/parser/exprTree.py @@ -418,7 +418,7 @@ def __str__(self) -> str: def function_call(function: str, args: list[Node]) -> Node: - """Factory method for nodes representing function calls. + """Return node representing function calls. Attributes ---------- diff --git a/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py b/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py index 853ecb93b8..0ac1a9a122 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py +++ b/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py @@ -79,7 +79,7 @@ class ParserLex: @classmethod def make_lexer(cls, reflags=0, **kwargs): - """Factory for lexers. + """Return lexer. Returns ------- diff --git a/python/lsst/daf/butler/registry/tests/_database.py b/python/lsst/daf/butler/registry/tests/_database.py index 1b1a8f1659..2e4eac2b78 100644 --- a/python/lsst/daf/butler/registry/tests/_database.py +++ b/python/lsst/daf/butler/registry/tests/_database.py @@ -751,7 +751,7 @@ async def side1(lock: Iterable[str] = ()) -> tuple[set[str], set[str]]: return names1, names2 async def side2() -> None: - """The other side of the concurrent locking test. + """Other side of the concurrent locking test. This side just waits a bit and then tries to insert a row into the table that the other side is trying to lock. Hopefully that diff --git a/python/lsst/daf/butler/registry/tests/_registry.py b/python/lsst/daf/butler/registry/tests/_registry.py index e52b0f9c0c..435f295690 100644 --- a/python/lsst/daf/butler/registry/tests/_registry.py +++ b/python/lsst/daf/butler/registry/tests/_registry.py @@ -2443,9 +2443,7 @@ def testTimespanQueries(self): } def query(where): - """Helper function that queries for visit data IDs and returns - results as a sorted, deduplicated list of visit IDs. - """ + """Return results as a sorted, deduplicated list of visit IDs.""" return sorted( { dataId["visit"] diff --git a/python/lsst/daf/butler/tests/_testRepo.py b/python/lsst/daf/butler/tests/_testRepo.py index 1903bffd7b..0f69c20f84 100644 --- a/python/lsst/daf/butler/tests/_testRepo.py +++ b/python/lsst/daf/butler/tests/_testRepo.py @@ -549,7 +549,7 @@ def apply(butler: Butler) -> None: def _mock_export( refs: Iterable[DatasetRef], *, directory: str | None = None, transfer: str | None = None ) -> Iterable[FileDataset]: - """A mock of `Datastore.export` that satisfies the requirement that + """Mock of `Datastore.export` that satisfies the requirement that the refs passed in are included in the `FileDataset` objects returned. @@ -571,7 +571,7 @@ def _mock_get( parameters: Mapping[str, Any] | None = None, storageClass: StorageClass | str | None = None, ) -> tuple[DatasetId, Mapping[str, Any] | None]: - """A mock of `Datastore.get` that just returns the integer dataset ID + """Mock of `Datastore.get` that just returns the integer dataset ID value and parameters it was given. """ return (ref.id, parameters) diff --git a/tests/data/registry/spatial.py b/tests/data/registry/spatial.py index fc0df27f00..818c8204f8 100644 --- a/tests/data/registry/spatial.py +++ b/tests/data/registry/spatial.py @@ -160,7 +160,7 @@ def main(): - """Main entry point for the script.""" + """Run script.""" parser = argparse.ArgumentParser(description="Create and examine spatial-topology registry test data.") default_filename = os.path.join(os.path.dirname(__file__), "spatial.yaml") parser.add_argument( diff --git a/tests/test_butler.py b/tests/test_butler.py index a9bbcf3d71..e7210b213f 100644 --- a/tests/test_butler.py +++ b/tests/test_butler.py @@ -47,7 +47,7 @@ boto3 = None def mock_s3(cls): # type: ignore[no-untyped-def] - """A no-op decorator in case moto mock_s3 can not be imported.""" + """No-op decorator in case moto mock_s3 can not be imported.""" return cls @@ -1223,7 +1223,7 @@ class FileDatastoreButlerTests(ButlerTests): """ def checkFileExists(self, root: str | ResourcePath, relpath: str | ResourcePath) -> bool: - """Checks if file exists at a given path (relative to root). + """Check if file exists at a given path (relative to root). Test testPutTemplates verifies actual physical existance of the files in the requested location. @@ -1315,8 +1315,10 @@ def testImportExportVirtualComposite(self) -> None: self.runImportExportTest(storageClass) def runImportExportTest(self, storageClass: StorageClass) -> None: - """This test does an export to a temp directory and an import back - into a new temp directory repo. It does not assume a posix datastore + """Test exporting and importing. + + This test does an export to a temp directory and an import back + into a new temp directory repo. It does not assume a posix datastore. """ exportButler = self.runPutGetTest(storageClass, "test_metric") @@ -1933,7 +1935,7 @@ class S3DatastoreButlerTestCase(FileDatastoreButlerTests, unittest.TestCase): """The mocked s3 interface from moto.""" def genRoot(self) -> str: - """Returns a random string of len 20 to serve as a root + """Return a random string of len 20 to serve as a root name for the temporary bucket repo. This is equivalent to tempfile.mkdtemp as this is what self.root diff --git a/tests/test_cliCmdQueryDataIds.py b/tests/test_cliCmdQueryDataIds.py index 6005fe1eda..530a641e05 100644 --- a/tests/test_cliCmdQueryDataIds.py +++ b/tests/test_cliCmdQueryDataIds.py @@ -39,9 +39,7 @@ class QueryDataIdsTest(unittest.TestCase, ButlerTestHelper): @staticmethod def _queryDataIds(repo, dimensions=(), collections=(), datasets=None, where=""): - """Helper to populate the call to script.queryDataIds with default - values. - """ + """Call script.queryDataIds, allowing for default values.""" return script.queryDataIds( repo=repo, dimensions=dimensions, diff --git a/tests/test_cliPluginLoader.py b/tests/test_cliPluginLoader.py index 83277730b8..281c63ef71 100644 --- a/tests/test_cliPluginLoader.py +++ b/tests/test_cliPluginLoader.py @@ -41,7 +41,7 @@ def command_test(): @contextmanager def duplicate_command_test_env(runner): - """A context manager that creates (and then cleans up) an environment that + """Context manager that creates (and then cleans up) an environment that declares a plugin command named 'create', which will conflict with the daf_butler 'create' command. diff --git a/tests/test_cliUtils.py b/tests/test_cliUtils.py index a5a27ea352..7a56ff971a 100644 --- a/tests/test_cliUtils.py +++ b/tests/test_cliUtils.py @@ -49,7 +49,7 @@ def testHelp(self): @repo_argument(help="repo help text") @directory_argument(help="directory help text") def cli(): - """The cli help message.""" + """Return the cli help message.""" pass self.runTest(cli) @@ -61,16 +61,13 @@ def testHelpWrapped(self): @repo_argument(help="repo help text") @directory_argument(help="directory help text") def cli(): - """The cli - help - message. - """ + """Return the cli help message.""" pass self.runTest(cli) def runTest(self, cli): - """Tests `utils.addArgumentHelp` and its use in repo_argument and + """Test `utils.addArgumentHelp` and its use in repo_argument and directory_argument; verifies that the argument help gets added to the command function help, and that it's added in the correct order. See addArgumentHelp for more details. diff --git a/tests/test_config.py b/tests/test_config.py index aa1e9fe646..729e5f84d9 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -186,7 +186,7 @@ def testBasics(self): self.assertFalse(c) def testDict(self): - """Test toDict()""" + """Test toDict().""" c1 = Config({"a": {"b": 1}, "c": 2}) self.assertIsInstance(c1["a"], Config) d1 = c1.toDict() @@ -198,7 +198,7 @@ def testDict(self): self.assertNotEqual(d1["a"], c1["a"]) def assertSplit(self, answer, *args): - """Helper function to compare string splitting""" + """Assert that string splitting was correct.""" for s in (answer, *args): split = Config._splitIntoKeys(s) self.assertEqual(split, answer) diff --git a/tests/test_query_relations.py b/tests/test_query_relations.py index 60f63a1ff1..ffead5de84 100644 --- a/tests/test_query_relations.py +++ b/tests/test_query_relations.py @@ -95,7 +95,7 @@ def assert_relation_str( | queries.DimensionRecordQueryResults | queries.ParentDatasetQueryResults, ) -> None: - """A specialized test assert that checks that one or more registry + """Assert that checks that one or more registry queries have relation trees that match the given string. Parameters From ae554392658bf9b5ecd2b53e9bd6b86a09a9f4f7 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 14:19:01 -0700 Subject: [PATCH 05/16] Remove PositiveInt class that is no longer used --- python/lsst/daf/butler/core/datasets/ref.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/python/lsst/daf/butler/core/datasets/ref.py b/python/lsst/daf/butler/core/datasets/ref.py index 99141c5140..005fd5b1ff 100644 --- a/python/lsst/daf/butler/core/datasets/ref.py +++ b/python/lsst/daf/butler/core/datasets/ref.py @@ -35,7 +35,7 @@ from typing import TYPE_CHECKING, Any, ClassVar from lsst.utils.classes import immutable -from pydantic import BaseModel, ConstrainedInt, StrictStr, validator +from pydantic import BaseModel, StrictStr, validator from ..configSupport import LookupKey from ..dimensions import DataCoordinate, DimensionGraph, DimensionUniverse, SerializedDataCoordinate @@ -56,11 +56,6 @@ class AmbiguousDatasetError(Exception): """ -class PositiveInt(ConstrainedInt): - ge = 0 - strict = True - - class DatasetIdGenEnum(enum.Enum): """Enum used to specify dataset ID generation options.""" From f9e867469b0ae883bd81b9ef7225410be718b878 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 14:47:41 -0700 Subject: [PATCH 06/16] Fix myriad docstyle issues --- python/lsst/daf/butler/cli/butler.py | 12 +++++++++--- python/lsst/daf/butler/cli/opt/optionGroups.py | 2 ++ python/lsst/daf/butler/cli/opt/options.py | 2 ++ python/lsst/daf/butler/core/datastore.py | 2 ++ python/lsst/daf/butler/core/json.py | 4 ++++ .../lsst/daf/butler/delegates/arrowastropy.py | 2 ++ python/lsst/daf/butler/delegates/arrownumpy.py | 2 ++ .../lsst/daf/butler/delegates/arrownumpydict.py | 2 ++ python/lsst/daf/butler/delegates/arrowtable.py | 2 ++ python/lsst/daf/butler/delegates/dataframe.py | 2 ++ python/lsst/daf/butler/script/ingest_files.py | 5 +++-- python/lsst/daf/butler/script/queryDataIds.py | 5 ++++- .../daf/butler/script/queryDimensionRecords.py | 8 +++++--- python/lsst/daf/butler/server.py | 17 +++++++++++++++++ python/lsst/daf/butler/tests/cliLogTestBase.py | 1 + 15 files changed, 59 insertions(+), 9 deletions(-) diff --git a/python/lsst/daf/butler/cli/butler.py b/python/lsst/daf/butler/cli/butler.py index 1de9788d54..61d72a3754 100755 --- a/python/lsst/daf/butler/cli/butler.py +++ b/python/lsst/daf/butler/cli/butler.py @@ -326,6 +326,8 @@ def _raiseIfDuplicateCommands(commands: defaultdict[str, list[str]]) -> None: class ButlerCLI(LoaderCLI): + """Specialized command loader implementing the ``butler`` command.""" + localCmdPkg = "lsst.daf.butler.cli.cmd" pluginEnvVar = "DAF_BUTLER_PLUGINS" @@ -364,11 +366,15 @@ def _cmdNameToFuncName(cls, commandName: str) -> str: @log_label_option() @ClickProgressHandler.option def cli(log_level: str, long_log: bool, log_file: str, log_tty: bool, log_label: str, progress: bool) -> None: - # log_level is handled by get_command and list_commands, and is called in - # one of those functions before this is called. long_log is handled by - # setup_logging. + """Command line interface for butler. + + log_level is handled by get_command and list_commands, and is called in + one of those functions before this is called. long_log is handled by + setup_logging. + """ pass def main() -> click.Command: + """Return main entry point for command-line.""" return cli() diff --git a/python/lsst/daf/butler/cli/opt/optionGroups.py b/python/lsst/daf/butler/cli/opt/optionGroups.py index 97312be88f..39c9573333 100644 --- a/python/lsst/daf/butler/cli/opt/optionGroups.py +++ b/python/lsst/daf/butler/cli/opt/optionGroups.py @@ -31,6 +31,8 @@ class query_datasets_options(OptionGroup): # noqa: N801 + """A collection of options common to querying datasets.""" + def __init__(self, repo: bool = True, showUri: bool = True, useArguments: bool = True) -> None: self.decorators = [] if repo: diff --git a/python/lsst/daf/butler/cli/opt/options.py b/python/lsst/daf/butler/cli/opt/options.py index 6fd5d0f63c..68d3ac5836 100644 --- a/python/lsst/daf/butler/cli/opt/options.py +++ b/python/lsst/daf/butler/cli/opt/options.py @@ -60,6 +60,8 @@ class CollectionTypeCallback: + """Helper class for handling different collection types.""" + collectionTypes = tuple(collectionType.name for collectionType in CollectionType.all()) @staticmethod diff --git a/python/lsst/daf/butler/core/datastore.py b/python/lsst/daf/butler/core/datastore.py index 81ba9b1117..6b06be1220 100644 --- a/python/lsst/daf/butler/core/datastore.py +++ b/python/lsst/daf/butler/core/datastore.py @@ -67,6 +67,8 @@ class DatastoreValidationError(ValidationError): @dataclasses.dataclass(frozen=True) class Event: + """Representation of an event that can be rolled back.""" + __slots__ = {"name", "undoFunc", "args", "kwargs"} name: str undoFunc: Callable diff --git a/python/lsst/daf/butler/core/json.py b/python/lsst/daf/butler/core/json.py index 1540c654e8..69d0f26e4f 100644 --- a/python/lsst/daf/butler/core/json.py +++ b/python/lsst/daf/butler/core/json.py @@ -32,6 +32,10 @@ class SupportsSimple(Protocol): + """Protocol defining the methods required to support the standard + serialization using "simple" methods names. + """ + _serializedType: Type def to_simple(self, minimal: bool) -> Any: diff --git a/python/lsst/daf/butler/delegates/arrowastropy.py b/python/lsst/daf/butler/delegates/arrowastropy.py index d98c1253b2..c488583ca4 100644 --- a/python/lsst/daf/butler/delegates/arrowastropy.py +++ b/python/lsst/daf/butler/delegates/arrowastropy.py @@ -34,6 +34,8 @@ class ArrowAstropyDelegate(ArrowTableDelegate): + """Delegate that understands the ``ArrowAstropy`` storage class.""" + _datasetType = atable.Table def getComponent(self, composite: atable.Table, componentName: str) -> Any: diff --git a/python/lsst/daf/butler/delegates/arrownumpy.py b/python/lsst/daf/butler/delegates/arrownumpy.py index 6af3f91837..ebe970a202 100644 --- a/python/lsst/daf/butler/delegates/arrownumpy.py +++ b/python/lsst/daf/butler/delegates/arrownumpy.py @@ -36,6 +36,8 @@ class ArrowNumpyDelegate(ArrowTableDelegate): + """Delegate that understands the ``ArrowNumpy`` storage class.""" + _datasetType = np.ndarray def getComponent(self, composite: np.ndarray, componentName: str) -> Any: diff --git a/python/lsst/daf/butler/delegates/arrownumpydict.py b/python/lsst/daf/butler/delegates/arrownumpydict.py index 24456ccab0..0b8df01001 100644 --- a/python/lsst/daf/butler/delegates/arrownumpydict.py +++ b/python/lsst/daf/butler/delegates/arrownumpydict.py @@ -36,6 +36,8 @@ class ArrowNumpyDictDelegate(ArrowTableDelegate): + """Delegate that understands the ``ArrowNumpyDict`` storage class.""" + _datasetType = dict def getComponent(self, composite: dict[str, np.ndarray], componentName: str) -> Any: diff --git a/python/lsst/daf/butler/delegates/arrowtable.py b/python/lsst/daf/butler/delegates/arrowtable.py index 59af12c64f..eab6fdb20a 100644 --- a/python/lsst/daf/butler/delegates/arrowtable.py +++ b/python/lsst/daf/butler/delegates/arrowtable.py @@ -34,6 +34,8 @@ class ArrowTableDelegate(StorageClassDelegate): + """Delegate that understands the ``ArrowTable`` storage class.""" + _datasetType = pa.Table def getComponent(self, composite: pa.Table, componentName: str) -> Any: diff --git a/python/lsst/daf/butler/delegates/dataframe.py b/python/lsst/daf/butler/delegates/dataframe.py index 770229f052..62cb03fd51 100644 --- a/python/lsst/daf/butler/delegates/dataframe.py +++ b/python/lsst/daf/butler/delegates/dataframe.py @@ -38,6 +38,8 @@ class DataFrameDelegate(StorageClassDelegate): + """Delegate that understands the ``DataFrame`` storage class.""" + def getComponent(self, composite: pandas.DataFrame, componentName: str) -> Any: """Get a component from a DataFrame. diff --git a/python/lsst/daf/butler/script/ingest_files.py b/python/lsst/daf/butler/script/ingest_files.py index 8784423a66..78b99da3b4 100644 --- a/python/lsst/daf/butler/script/ingest_files.py +++ b/python/lsst/daf/butler/script/ingest_files.py @@ -201,8 +201,9 @@ def extract_datasets_from_table( def parse_data_id_tuple(data_ids: tuple[str, ...], universe: DimensionUniverse) -> dict[str, Any]: - # Convert any additional k=v strings in the dataId tuple to dict - # form. + """Convert any additional k=v strings in the dataId tuple to dict + form. + """ data_id: dict[str, Any] = {} for id_str in data_ids: dimension_str, value = id_str.split("=") diff --git a/python/lsst/daf/butler/script/queryDataIds.py b/python/lsst/daf/butler/script/queryDataIds.py index 16f67fd4c1..44ad689bef 100644 --- a/python/lsst/daf/butler/script/queryDataIds.py +++ b/python/lsst/daf/butler/script/queryDataIds.py @@ -98,8 +98,11 @@ def queryDataIds( limit: int, offset: int, ) -> tuple[AstropyTable | None, str | None]: - # Docstring for supported parameters is the same as Registry.queryDataIds + """Query for data IDs. + Docstring for supported parameters is the same as + `~lsst.daf.butler.Registry.queryDataIds`. + """ butler = Butler(repo) if datasets and collections and not dimensions: diff --git a/python/lsst/daf/butler/script/queryDimensionRecords.py b/python/lsst/daf/butler/script/queryDimensionRecords.py index f8c3544961..868986e21a 100644 --- a/python/lsst/daf/butler/script/queryDimensionRecords.py +++ b/python/lsst/daf/butler/script/queryDimensionRecords.py @@ -42,10 +42,12 @@ def queryDimensionRecords( limit: int, offset: int, ) -> Table | None: - # Docstring for supported parameters is the same as - # Registry.queryDimensionRecords except for ``no_check``, which is the - # inverse of ``check``. + """Query dimension records. + Docstring for supported parameters is the same as + `~lsst.daf.butler.Registry.queryDimensionRecords` except for ``no_check``, + which is the inverse of ``check``. + """ butler = Butler(repo) query_collections: Iterable[str] | EllipsisType | None = None diff --git a/python/lsst/daf/butler/server.py b/python/lsst/daf/butler/server.py index edb52b3e62..57e00b7576 100644 --- a/python/lsst/daf/butler/server.py +++ b/python/lsst/daf/butler/server.py @@ -85,11 +85,13 @@ def _make_global_butler() -> None: def butler_readonly_dependency() -> Butler: + """Return global read-only butler.""" _make_global_butler() return Butler(butler=GLOBAL_READONLY_BUTLER) def butler_readwrite_dependency() -> Butler: + """Return read-write butler.""" _make_global_butler() return Butler(butler=GLOBAL_READWRITE_BUTLER) @@ -116,6 +118,7 @@ def unpack_dataId(butler: Butler, data_id: SerializedDataCoordinate | None) -> D @app.get("/butler/") def read_root() -> str: + """Return message when accessing the root URL.""" return "Welcome to Excalibur... aka your Butler Server" @@ -154,6 +157,7 @@ def get_uri(id: DatasetId, butler: Butler = Depends(butler_readonly_dependency)) @app.put("/butler/v1/registry/refresh") def refresh(butler: Butler = Depends(butler_readonly_dependency)) -> None: + """Refresh the registry cache.""" # Unclear whether this should exist. Which butler is really being # refreshed? How do we know the server we are refreshing is used later? # For testing at the moment it is important if a test adds a dataset type @@ -172,6 +176,7 @@ def refresh(butler: Butler = Depends(butler_readonly_dependency)) -> None: def get_dataset_type( datasetTypeName: str, butler: Butler = Depends(butler_readonly_dependency) ) -> SerializedDatasetType: + """Return the dataset type.""" datasetType = butler.registry.getDatasetType(datasetTypeName) return datasetType.to_simple() @@ -187,6 +192,7 @@ def get_dataset_type( def query_all_dataset_types( components: bool | None = Query(None), butler: Butler = Depends(butler_readonly_dependency) ) -> list[SerializedDatasetType]: + """Return all dataset types.""" datasetTypes = butler.registry.queryDatasetTypes(..., components=components) return [d.to_simple() for d in datasetTypes] @@ -205,6 +211,7 @@ def query_dataset_types_re( components: bool | None = Query(None), butler: Butler = Depends(butler_readonly_dependency), ) -> list[SerializedDatasetType]: + """Return all dataset types matching a regular expression.""" expression_params = ExpressionQueryParameter(regex=regex, glob=glob) datasetTypes = butler.registry.queryDatasetTypes(expression_params.expression(), components=components) @@ -213,6 +220,7 @@ def query_dataset_types_re( @app.get("/butler/v1/registry/collection/chain/{parent:path}", response_model=list[str]) def get_collection_chain(parent: str, butler: Butler = Depends(butler_readonly_dependency)) -> list[str]: + """Return the collection chain members.""" chain = butler.registry.getCollectionChain(parent) return list(chain) @@ -227,6 +235,7 @@ def query_collections( includeChains: bool | None = Query(None), butler: Butler = Depends(butler_readonly_dependency), ) -> list[str]: + """Return collections matching query.""" expression_params = ExpressionQueryParameter(regex=regex, glob=glob) collectionTypes = CollectionType.from_names(collectionType) dataset_type = butler.registry.getDatasetType(datasetType) if datasetType else None @@ -243,6 +252,7 @@ def query_collections( @app.get("/butler/v1/registry/collection/type/{name:path}", response_model=str) def get_collection_type(name: str, butler: Butler = Depends(butler_readonly_dependency)) -> str: + """Return type for named collection.""" collectionType = butler.registry.getCollectionType(name) return collectionType.name @@ -254,6 +264,7 @@ def register_collection( doc: str | None = Query(None), butler: Butler = Depends(butler_readwrite_dependency), ) -> str: + """Register a collection.""" collectionType = CollectionType.from_name(collectionTypeName) butler.registry.registerCollection(name, collectionType, doc) @@ -276,6 +287,7 @@ def register_collection( def get_dataset( id: DatasetId, butler: Butler = Depends(butler_readonly_dependency) ) -> SerializedDatasetRef | None: + """Return a single dataset reference.""" ref = butler.registry.getDataset(id) if ref is not None: return ref.to_simple() @@ -286,6 +298,7 @@ def get_dataset( @app.get("/butler/v1/registry/datasetLocations/{id}", response_model=list[str]) def get_dataset_locations(id: DatasetId, butler: Butler = Depends(butler_readonly_dependency)) -> list[str]: + """Return locations of datasets.""" # Takes an ID so need to convert to a real DatasetRef fake_ref = SerializedDatasetRef(id=id) @@ -317,6 +330,7 @@ def find_dataset( collections: list[str] | None = Query(None), butler: Butler = Depends(butler_readonly_dependency), ) -> SerializedDatasetRef | None: + """Return a single dataset reference matching query.""" collection_query = collections if collections else None ref = butler.registry.findDataset( @@ -337,6 +351,7 @@ def find_dataset( def query_datasets( query: QueryDatasetsModel, butler: Butler = Depends(butler_readonly_dependency) ) -> list[SerializedDatasetRef]: + """Return datasets matching query.""" # This method might return a lot of results if query.collections: @@ -371,6 +386,7 @@ def query_datasets( def query_data_ids( query: QueryDataIdsModel, butler: Butler = Depends(butler_readonly_dependency) ) -> list[SerializedDataCoordinate]: + """Return data IDs matching query.""" if query.datasets: datasets = query.datasets.expression() else: @@ -406,6 +422,7 @@ def query_data_ids( def query_dimension_records( element: str, query: QueryDimensionRecordsModel, butler: Butler = Depends(butler_readonly_dependency) ) -> list[SerializedDimensionRecord]: + """Return dimension records matching query.""" if query.datasets: datasets = query.datasets.expression() else: diff --git a/python/lsst/daf/butler/tests/cliLogTestBase.py b/python/lsst/daf/butler/tests/cliLogTestBase.py index affcff232a..7844160794 100644 --- a/python/lsst/daf/butler/tests/cliLogTestBase.py +++ b/python/lsst/daf/butler/tests/cliLogTestBase.py @@ -75,6 +75,7 @@ def command_log_settings_test( expected_lsstbutler_level: str, expected_lsstx_level: str, ) -> None: + """Test command-line log settings.""" LogLevel = namedtuple("LogLevel", ("expected", "actual", "name")) logLevels = [ From 4989c418fa1c9254e2295d947f417f3e9c0e6dac Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 15:44:07 -0700 Subject: [PATCH 07/16] More doc fixes --- python/lsst/daf/butler/registry/_config.py | 2 ++ python/lsst/daf/butler/registry/_registry.py | 2 +- .../daf/butler/registry/queries/_readers.py | 2 ++ .../daf/butler/registry/tests/_database.py | 4 +++- .../lsst/daf/butler/script/_pruneDatasets.py | 2 ++ .../lsst/daf/butler/tests/_datasetsHelper.py | 2 ++ tests/test_butler.py | 4 ++++ tests/test_cliCmdConfigDump.py | 2 ++ tests/test_cliCmdConfigValidate.py | 2 ++ tests/test_cliCmdCreate.py | 2 ++ tests/test_cliCmdImport.py | 2 ++ tests/test_cliCmdIngestFiles.py | 2 ++ tests/test_cliCmdPruneDatasets.py | 3 +++ tests/test_cliCmdQueryCollections.py | 6 +++++ tests/test_cliCmdQueryDataIds.py | 2 ++ tests/test_cliCmdQueryDatasetTypes.py | 4 ++++ tests/test_cliCmdQueryDatasets.py | 3 +++ tests/test_cliCmdQueryDimensionRecords.py | 2 ++ tests/test_cliCmdRetrieveArtifacts.py | 2 ++ tests/test_cliLog.py | 2 ++ tests/test_cliPluginLoader.py | 5 ++++ tests/test_cliUtilSplitCommas.py | 3 +++ tests/test_cliUtilToUpper.py | 3 +++ tests/test_cliUtils.py | 24 +++++++++++++------ tests/test_composites.py | 2 ++ tests/test_config.py | 23 ++++++++++++++++-- tests/test_constraints.py | 2 ++ tests/test_datastore.py | 5 ++++ tests/test_dimensions.py | 2 ++ tests/test_expressions.py | 2 ++ tests/test_logging.py | 2 ++ tests/test_postgresql.py | 2 ++ tests/test_quantum.py | 2 ++ tests/test_sqlite.py | 1 + tests/test_storageClass.py | 2 ++ tests/test_testRepo.py | 2 ++ tests/test_utils.py | 6 +++++ 37 files changed, 129 insertions(+), 11 deletions(-) diff --git a/python/lsst/daf/butler/registry/_config.py b/python/lsst/daf/butler/registry/_config.py index 67eaa33654..4212b2f11d 100644 --- a/python/lsst/daf/butler/registry/_config.py +++ b/python/lsst/daf/butler/registry/_config.py @@ -38,6 +38,8 @@ class RegistryConfig(ConfigSubset): + """Configuration specific to a butler Registry.""" + component = "registry" requiredKeys = ("db",) defaultConfigFile = "registry.yaml" diff --git a/python/lsst/daf/butler/registry/_registry.py b/python/lsst/daf/butler/registry/_registry.py index 29ab5a3453..8ca9cdadcf 100644 --- a/python/lsst/daf/butler/registry/_registry.py +++ b/python/lsst/daf/butler/registry/_registry.py @@ -1666,7 +1666,7 @@ def queryDatasetAssociations( @property def obsCoreTableManager(self) -> ObsCoreTableManager | None: - """ObsCore manager instance for this registry + """The ObsCore manager instance for this registry (`~.interfaces.ObsCoreTableManager` or `None`). diff --git a/python/lsst/daf/butler/registry/queries/_readers.py b/python/lsst/daf/butler/registry/queries/_readers.py index e112e2cd10..6b61cbb82d 100644 --- a/python/lsst/daf/butler/registry/queries/_readers.py +++ b/python/lsst/daf/butler/registry/queries/_readers.py @@ -321,6 +321,8 @@ def columns_required(self) -> Set[ColumnTag]: class DimensionRecordReader: + """Read dimension records.""" + def __init__(self, element: DimensionElement): self._cls = element.RecordClass self._tags = element.RecordClass.fields.columns diff --git a/python/lsst/daf/butler/registry/tests/_database.py b/python/lsst/daf/butler/registry/tests/_database.py index 2e4eac2b78..e3bd8608dd 100644 --- a/python/lsst/daf/butler/registry/tests/_database.py +++ b/python/lsst/daf/butler/registry/tests/_database.py @@ -763,7 +763,9 @@ async def side2() -> None: """ def toRunInThread(): - """SQLite locking isn't asyncio-friendly unless we actually + """Create new SQLite connection for use in thread. + + SQLite locking isn't asyncio-friendly unless we actually run it in another thread. And SQLite gets very unhappy if we try to use a connection from multiple threads, so we have to create the new connection here instead of out in the main diff --git a/python/lsst/daf/butler/script/_pruneDatasets.py b/python/lsst/daf/butler/script/_pruneDatasets.py index b62548a2d3..b66e93b9e2 100644 --- a/python/lsst/daf/butler/script/_pruneDatasets.py +++ b/python/lsst/daf/butler/script/_pruneDatasets.py @@ -61,6 +61,8 @@ class PruneDatasetsResult: onConfirmation: Callable | None class State(Enum): + """State associated with dataset pruning request.""" + INIT = auto() DRY_RUN_COMPLETE = auto() AWAITING_CONFIRMATION = auto() diff --git a/python/lsst/daf/butler/tests/_datasetsHelper.py b/python/lsst/daf/butler/tests/_datasetsHelper.py index a644a9d3f7..ff7e7f0b6a 100644 --- a/python/lsst/daf/butler/tests/_datasetsHelper.py +++ b/python/lsst/daf/butler/tests/_datasetsHelper.py @@ -160,6 +160,8 @@ def _writeFile(self, inMemoryDataset: Any) -> None: class MultiDetectorFormatter(YamlFormatter): + """A formatter that requires a detector to be specified in the dataID.""" + def _writeFile(self, inMemoryDataset: Any) -> None: raise NotImplementedError("Can not write") diff --git a/tests/test_butler.py b/tests/test_butler.py index e7210b213f..7e94633b8c 100644 --- a/tests/test_butler.py +++ b/tests/test_butler.py @@ -118,6 +118,7 @@ def clean_environment() -> None: def makeExampleMetrics() -> MetricsExample: + """Return example dataset suitable for tests.""" return MetricsExample( {"AM1": 5.2, "AM2": 30.6}, {"a": [1, 2, 3], "b": {"blue": 5, "red": "green"}}, @@ -2325,10 +2326,13 @@ def assertButlerTransfers(self, purge: bool = False, storageClassName: str = "St class ChainedDatastoreTransfers(PosixDatastoreTransfers): + """Test transfers using a chained datastore.""" + configFile = os.path.join(TESTDIR, "config/basic/butler-chained.yaml") def setup_module(module: types.ModuleType) -> None: + """Set up the module for pytest.""" clean_environment() diff --git a/tests/test_cliCmdConfigDump.py b/tests/test_cliCmdConfigDump.py index 6d4d2d86c8..924fbfac30 100644 --- a/tests/test_cliCmdConfigDump.py +++ b/tests/test_cliCmdConfigDump.py @@ -38,6 +38,8 @@ class ConfigDumpTest(CliCmdTestBase, unittest.TestCase): + """Test the butler config-dump command line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.configDump" @staticmethod diff --git a/tests/test_cliCmdConfigValidate.py b/tests/test_cliCmdConfigValidate.py index 66e6eba0a2..de582753aa 100644 --- a/tests/test_cliCmdConfigValidate.py +++ b/tests/test_cliCmdConfigValidate.py @@ -31,6 +31,8 @@ class ValidateTest(CliCmdTestBase, unittest.TestCase): + """Test the config validation command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.configValidate" @staticmethod diff --git a/tests/test_cliCmdCreate.py b/tests/test_cliCmdCreate.py index 68372bcea1..1052b42d1f 100644 --- a/tests/test_cliCmdCreate.py +++ b/tests/test_cliCmdCreate.py @@ -26,6 +26,8 @@ class CreateTest(CliCmdTestBase, unittest.TestCase): + """Test the butler create command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.createRepo" @staticmethod diff --git a/tests/test_cliCmdImport.py b/tests/test_cliCmdImport.py index cd111c1671..116400cf40 100644 --- a/tests/test_cliCmdImport.py +++ b/tests/test_cliCmdImport.py @@ -30,6 +30,8 @@ class ImportTestCase(CliCmdTestBase, unittest.TestCase): + """Test the butler import command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.butlerImport" @staticmethod diff --git a/tests/test_cliCmdIngestFiles.py b/tests/test_cliCmdIngestFiles.py index bd0bed20a9..eaddf402bc 100644 --- a/tests/test_cliCmdIngestFiles.py +++ b/tests/test_cliCmdIngestFiles.py @@ -37,6 +37,8 @@ class CliIngestFilesTest(unittest.TestCase, ButlerTestHelper): + """Test ingest-files command line.""" + configFile = os.path.join(TESTDIR, "config/basic/butler.yaml") def setUp(self): diff --git a/tests/test_cliCmdPruneDatasets.py b/tests/test_cliCmdPruneDatasets.py index e4dbe8c1eb..304f19ed5f 100644 --- a/tests/test_cliCmdPruneDatasets.py +++ b/tests/test_cliCmdPruneDatasets.py @@ -55,16 +55,19 @@ def getTables(): + """Return test table.""" if doFindTables: return (Table(((1, 2, 3),), names=("foo",)),) return tuple() def getDatasets(): + """Return the datasets string.""" return "datasets" def makeQueryDatasets(*args, **kwargs): + """Return a query datasets object.""" return QueryDatasets(*args, **kwargs) diff --git a/tests/test_cliCmdQueryCollections.py b/tests/test_cliCmdQueryCollections.py index 60abf1d86c..c66f7ff093 100644 --- a/tests/test_cliCmdQueryCollections.py +++ b/tests/test_cliCmdQueryCollections.py @@ -39,6 +39,8 @@ class QueryCollectionsCmdTest(CliCmdTestBase, unittest.TestCase): + """Test the query-collections command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.queryCollections" @staticmethod @@ -79,6 +81,8 @@ def test_all(self): class QueryCollectionsScriptTest(ButlerTestHelper, unittest.TestCase): + """Test the query-collections script interface.""" + def setUp(self): self.runner = LogCliRunner() @@ -114,6 +118,8 @@ def testGetCollections(self): class ChainedCollectionsTest(ButlerTestHelper, unittest.TestCase): + """Test the collection-chain command-line interface.""" + def setUp(self): self.runner = LogCliRunner() diff --git a/tests/test_cliCmdQueryDataIds.py b/tests/test_cliCmdQueryDataIds.py index 530a641e05..bc8a97ce54 100644 --- a/tests/test_cliCmdQueryDataIds.py +++ b/tests/test_cliCmdQueryDataIds.py @@ -35,6 +35,8 @@ class QueryDataIdsTest(unittest.TestCase, ButlerTestHelper): + """Test the query-data-ids command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.queryDataIds" @staticmethod diff --git a/tests/test_cliCmdQueryDatasetTypes.py b/tests/test_cliCmdQueryDatasetTypes.py index 7356550946..4491b4c020 100644 --- a/tests/test_cliCmdQueryDatasetTypes.py +++ b/tests/test_cliCmdQueryDatasetTypes.py @@ -34,6 +34,8 @@ class QueryDatasetTypesCmdTest(CliCmdTestBase, unittest.TestCase): + """Test the query-dataset-types command line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.queryDatasetTypes" @staticmethod @@ -65,6 +67,8 @@ def test_all(self): class QueryDatasetTypesScriptTest(ButlerTestHelper, unittest.TestCase): + """Test the query-dataset-types script interface.""" + def testQueryDatasetTypes(self): self.maxDiff = None datasetName = "test" diff --git a/tests/test_cliCmdQueryDatasets.py b/tests/test_cliCmdQueryDatasets.py index 80dbca1b9c..948a6c63de 100644 --- a/tests/test_cliCmdQueryDatasets.py +++ b/tests/test_cliCmdQueryDatasets.py @@ -36,6 +36,7 @@ def expectedFilesystemDatastoreTables(root: ResourcePath): + """Return the expected table contents.""" return ( AstropyTable( array( @@ -134,6 +135,8 @@ def expectedFilesystemDatastoreTables(root: ResourcePath): class QueryDatasetsTest(unittest.TestCase, ButlerTestHelper): + """Test the query-datasets command-line.""" + configFile = os.path.join(TESTDIR, "config/basic/butler.yaml") storageClassFactory = StorageClassFactory() diff --git a/tests/test_cliCmdQueryDimensionRecords.py b/tests/test_cliCmdQueryDimensionRecords.py index f571b4a8c8..a1716eef65 100644 --- a/tests/test_cliCmdQueryDimensionRecords.py +++ b/tests/test_cliCmdQueryDimensionRecords.py @@ -48,6 +48,8 @@ class QueryDimensionRecordsTest(unittest.TestCase, ButlerTestHelper): + """Test the query-dimension-records command-line.""" + mockFuncName = "lsst.daf.butler.cli.cmd.commands.script.queryDimensionRecords" configFile = os.path.join(TESTDIR, "config/basic/butler.yaml") diff --git a/tests/test_cliCmdRetrieveArtifacts.py b/tests/test_cliCmdRetrieveArtifacts.py index bde3300995..4762283f6a 100644 --- a/tests/test_cliCmdRetrieveArtifacts.py +++ b/tests/test_cliCmdRetrieveArtifacts.py @@ -35,6 +35,8 @@ class CliRetrieveArtifactsTest(unittest.TestCase, ButlerTestHelper): + """Test the retrieve-artifacts command-line.""" + configFile = os.path.join(TESTDIR, "config/basic/butler.yaml") storageClassFactory = StorageClassFactory() diff --git a/tests/test_cliLog.py b/tests/test_cliLog.py index c9472b79f2..9e321b3bcc 100644 --- a/tests/test_cliLog.py +++ b/tests/test_cliLog.py @@ -49,6 +49,8 @@ class CliLogTestCase(CliLogTestBase, unittest.TestCase): class ConvertPyLogLevelTestCase(unittest.TestCase): + """Test python command-line log levels.""" + def test_convertToPyLogLevel(self): self.assertEqual(logging.CRITICAL, CliLog._getPyLogLevel("CRITICAL")) self.assertEqual(logging.ERROR, CliLog._getPyLogLevel("ERROR")) diff --git a/tests/test_cliPluginLoader.py b/tests/test_cliPluginLoader.py index 281c63ef71..4cc62ce956 100644 --- a/tests/test_cliPluginLoader.py +++ b/tests/test_cliPluginLoader.py @@ -36,6 +36,7 @@ @click.command() def command_test(): + """Run command test.""" click.echo(message="test command") @@ -58,6 +59,8 @@ def duplicate_command_test_env(runner): class FailedLoadTest(unittest.TestCase): + """Test failed plugin loading.""" + def setUp(self): self.runner = LogCliRunner() @@ -87,6 +90,8 @@ def cli(): class PluginLoaderTest(unittest.TestCase): + """Test the command-line plugin loader.""" + def setUp(self): self.runner = LogCliRunner() diff --git a/tests/test_cliUtilSplitCommas.py b/tests/test_cliUtilSplitCommas.py index 89863b9183..1b9ca50f05 100644 --- a/tests/test_cliUtilSplitCommas.py +++ b/tests/test_cliUtilSplitCommas.py @@ -34,10 +34,13 @@ @click.command() @click.option("--list-of-values", "-l", multiple=True, callback=split_commas) def cli(list_of_values): + """Run mocked command line.""" mock(list_of_values) class SplitCommasTestCase(unittest.TestCase): + """Test the split commas utility.""" + def setUp(self): self.runner = LogCliRunner() diff --git a/tests/test_cliUtilToUpper.py b/tests/test_cliUtilToUpper.py index 1669ea40de..e6ed2c049d 100644 --- a/tests/test_cliUtilToUpper.py +++ b/tests/test_cliUtilToUpper.py @@ -31,10 +31,13 @@ @click.command() @click.option("--value", callback=to_upper) def cli(value): + """Run mock command.""" click.echo(value) class ToUpperTestCase(unittest.TestCase): + """Test the to_upper function.""" + def setUp(self): self.runner = LogCliRunner() diff --git a/tests/test_cliUtils.py b/tests/test_cliUtils.py index 7a56ff971a..498d9260f2 100644 --- a/tests/test_cliUtils.py +++ b/tests/test_cliUtils.py @@ -42,6 +42,8 @@ class ArgumentHelpGeneratorTestCase(unittest.TestCase): + """Test the help system.""" + def testHelp(self): @click.command() # Use custom help in the arguments so that any changes to default help @@ -89,6 +91,8 @@ def runTest(self, cli): class UnwrapStringTestCase(unittest.TestCase): + """Test string unwrapping.""" + def test_leadingNewline(self): testStr = """ foo bar @@ -132,14 +136,16 @@ def test_lineBreaks(self): class MWOptionTest(unittest.TestCase): + """Test MWOption.""" + def setUp(self): self.runner = LogCliRunner() - def test_addElipsisToMultiple(self): - """Verify that MWOption adds elipsis to the option metavar when + def test_addEllipsisToMultiple(self): + """Verify that MWOption adds ellipsis to the option metavar when `multiple=True` - The default behavior of click is to not add elipsis to options that + The default behavior of click is to not add ellipsis to options that have `multiple=True`. """ @@ -154,13 +160,13 @@ def cmd(things): --things TEXT ...""" self.assertIn(expectedOutput, result.output) - def test_addElipsisToNargs(self): + def test_addEllipsisToNargs(self): """Verify that MWOption adds " ..." after the option metavar when `nargs` is set to more than 1 and less than 1. - The default behavior of click is to add elipsis when nargs does not - equal 1, but it does not put a space before the elipsis and we prefer - a space between the metavar and the elipsis. + The default behavior of click is to add ellipsis when nargs does not + equal 1, but it does not put a space before the ellipsis and we prefer + a space between the metavar and the ellipsis. """ for numberOfArgs in (0, 1, 2): # nargs must be >= 0 for an option @@ -330,6 +336,8 @@ def test_section_function(self): class MWPathTest(unittest.TestCase): + """Test MWPath.""" + def getCmd(self, exists): @click.command() @click.option("--name", type=MWPath(exists=exists)) @@ -377,6 +385,8 @@ def test_exist(self): class MWCommandTest(unittest.TestCase): + """Test MWCommand.""" + def setUp(self): self.runner = click.testing.CliRunner() self.ctx = None diff --git a/tests/test_composites.py b/tests/test_composites.py index b07c1666f6..0ae2a09130 100644 --- a/tests/test_composites.py +++ b/tests/test_composites.py @@ -28,6 +28,8 @@ class TestCompositesConfig(unittest.TestCase): + """Test composites configuration.""" + @classmethod def setUpClass(cls): cls.configFile = os.path.join(TESTDIR, "config", "basic", "composites.yaml") diff --git a/tests/test_config.py b/tests/test_config.py index 729e5f84d9..8291ac7374 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -34,8 +34,7 @@ @contextlib.contextmanager def modified_environment(**environ): - """ - Temporarily set environment variables. + """Temporarily set environment variables. >>> with modified_environment(DAF_BUTLER_CONFIG_PATHS="/somewhere"): ... os.environ["DAF_BUTLER_CONFIG_PATHS"] == "/somewhere" @@ -59,43 +58,61 @@ def modified_environment(**environ): class ExampleWithConfigFileReference: + """Example class referenced from config file.""" + defaultConfigFile = "viacls.yaml" class ExampleWithConfigFileReference2: + """Example class referenced from config file.""" + defaultConfigFile = "viacls2.yaml" class ConfigTest(ConfigSubset): + """Default config class for testing.""" + component = "comp" requiredKeys = ("item1", "item2") defaultConfigFile = "testconfig.yaml" class ConfigTestPathlib(ConfigTest): + """Config with default using `pathlib.Path`.""" + defaultConfigFile = Path("testconfig.yaml") class ConfigTestEmpty(ConfigTest): + """Config pointing to empty file.""" + defaultConfigFile = "testconfig_empty.yaml" requiredKeys = () class ConfigTestButlerDir(ConfigTest): + """Simple config.""" + defaultConfigFile = "testConfigs/testconfig.yaml" class ConfigTestNoDefaults(ConfigTest): + """Test config with no defaults.""" + defaultConfigFile = None requiredKeys = () class ConfigTestAbsPath(ConfigTest): + """Test config with absolute paths.""" + defaultConfigFile = None requiredKeys = () class ConfigTestCls(ConfigTest): + """Test config.""" + defaultConfigFile = "withcls.yaml" @@ -668,6 +685,8 @@ def testResource(self): class FileWriteConfigTestCase(unittest.TestCase): + """Test writing of configs.""" + def setUp(self): self.tmpdir = makeTestTempDir(TESTDIR) diff --git a/tests/test_constraints.py b/tests/test_constraints.py index 12ef007d85..2feec0a5e1 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -28,6 +28,8 @@ class ConstraintsTestCase(unittest.TestCase, DatasetTestHelper): + """Test constraints system.""" + def setUp(self): self.id = 0 diff --git a/tests/test_datastore.py b/tests/test_datastore.py index f86facb61c..6120b0087c 100644 --- a/tests/test_datastore.py +++ b/tests/test_datastore.py @@ -73,6 +73,7 @@ def makeExampleMetrics(use_none: bool = False) -> MetricsExample: + """Make example dataset that can be stored in butler.""" if use_none: array = None else: @@ -1097,6 +1098,8 @@ def testTrash(self) -> None: class CleanupPosixDatastoreTestCase(DatastoreTestsBase, unittest.TestCase): + """Test datastore cleans up on failure.""" + configFile = os.path.join(TESTDIR, "config/basic/butler.yaml") def setUp(self) -> None: @@ -1773,6 +1776,8 @@ def testRepr(self) -> None: class StoredFileInfoTestCase(DatasetTestHelper, unittest.TestCase): + """Test the StoredFileInfo class.""" + storageClassFactory = StorageClassFactory() def test_StoredFileInfo(self) -> None: diff --git a/tests/test_dimensions.py b/tests/test_dimensions.py index ceb9ef6a99..14767065d0 100644 --- a/tests/test_dimensions.py +++ b/tests/test_dimensions.py @@ -433,6 +433,8 @@ def chain(self, n: int | None = None) -> Iterator: class DataCoordinateTestCase(unittest.TestCase): + """Test `DataCoordinate`.""" + RANDOM_SEED = 10 @classmethod diff --git a/tests/test_expressions.py b/tests/test_expressions.py index 1a5ff8c9c8..224c461061 100644 --- a/tests/test_expressions.py +++ b/tests/test_expressions.py @@ -42,6 +42,8 @@ class FakeDatasetRecordStorageManager: + """Fake class for representing dataset record storage.""" + ingestDate = Column("ingest_date") diff --git a/tests/test_logging.py b/tests/test_logging.py index 397a563061..ed8c283fef 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -265,6 +265,8 @@ def testMDC(self): class TestJsonLogging(unittest.TestCase): + """Test logging using JSON.""" + def testJsonLogStream(self): log = logging.getLogger(self.id()) log.setLevel(logging.INFO) diff --git a/tests/test_postgresql.py b/tests/test_postgresql.py index d40fbec1c8..78dc6c2308 100644 --- a/tests/test_postgresql.py +++ b/tests/test_postgresql.py @@ -61,6 +61,8 @@ def _startServer(root): @unittest.skipUnless(testing is not None, "testing.postgresql module not found") class PostgresqlDatabaseTestCase(unittest.TestCase, DatabaseTests): + """Test a postgres Registry.""" + @classmethod def setUpClass(cls): cls.root = makeTestTempDir(TESTDIR) diff --git a/tests/test_quantum.py b/tests/test_quantum.py index 9267fd85e4..168018c5cb 100644 --- a/tests/test_quantum.py +++ b/tests/test_quantum.py @@ -41,6 +41,8 @@ class MockTask: + """Mock task for testing.""" + pass diff --git a/tests/test_sqlite.py b/tests/test_sqlite.py index 0b9d71bc75..bf76af233e 100644 --- a/tests/test_sqlite.py +++ b/tests/test_sqlite.py @@ -38,6 +38,7 @@ @contextmanager def removeWritePermission(filename): + """Remove the write permission on a file.""" mode = os.stat(filename).st_mode try: os.chmod(filename, stat.S_IREAD) diff --git a/tests/test_storageClass.py b/tests/test_storageClass.py index 343f9e9803..6478c7353f 100644 --- a/tests/test_storageClass.py +++ b/tests/test_storageClass.py @@ -53,6 +53,8 @@ class PythonType3: class NotCopyable: + """Class with deep copying disabled.""" + def __deepcopy__(self, memo=None): raise RuntimeError("Can not be copied.") diff --git a/tests/test_testRepo.py b/tests/test_testRepo.py index cb2e907ae8..e0912326f4 100644 --- a/tests/test_testRepo.py +++ b/tests/test_testRepo.py @@ -66,6 +66,8 @@ def testMakeTestRepo(self): class ButlerUtilsTestSuite(unittest.TestCase): + """Test the butler test utilities.""" + @classmethod def setUpClass(cls): # Repository should be re-created for each test case, but diff --git a/tests/test_utils.py b/tests/test_utils.py index 74ea4f255f..98ddd95410 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -31,6 +31,8 @@ class NamedKeyDictTest(unittest.TestCase): + """Tests for NamedKeyDict.""" + def setUp(self): self.TestTuple = namedtuple("TestTuple", ("name", "id")) self.a = self.TestTuple(name="a", id=1) @@ -103,6 +105,8 @@ def testEquality(self): class NamedValueSetTest(unittest.TestCase): + """Tests for NamedValueSet.""" + def setUp(self): self.TestTuple = namedtuple("TestTuple", ("name", "id")) self.a = self.TestTuple(name="a", id=1) @@ -171,6 +175,8 @@ def testRemove(self): class GlobToRegexTestCase(unittest.TestCase): + """Tests for glob to regex.""" + def testStarInList(self): """Test that if a one of the items in the expression list is a star (stand-alone) then ``...`` is returned (which implies no restrictions) From 9e08698308241570ade652f0f0b8dee3d045378c Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 15:59:11 -0700 Subject: [PATCH 08/16] Minor fixes to parser files These files have lots of issues in docstrings and it's is better to global ignore many of the problems. --- .../queries/expressions/parser/parserLex.py | 13 ++++++------- .../queries/expressions/parser/parserYacc.py | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py b/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py index 0ac1a9a122..0deff8baf3 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py +++ b/python/lsst/daf/butler/registry/queries/expressions/parser/parserLex.py @@ -85,7 +85,6 @@ def make_lexer(cls, reflags=0, **kwargs): ------- `ply.lex.Lexer` instance. """ - # make sure that flags that we need are there kw = dict(reflags=reflags | re.IGNORECASE | re.VERBOSE) kw.update(kwargs) @@ -157,19 +156,19 @@ def make_lexer(cls, reflags=0, **kwargs): # Define a rule so we can track line numbers def t_newline(self, t): - r"\n+" + r"""\n+""" t.lexer.lineno += len(t.value) # quoted string prefixed with 'T' def t_TIME_LITERAL(self, t): - r"T'.*?'" + """T'.*?'""" # strip quotes t.value = t.value[2:-1] return t # quoted string def t_STRING_LITERAL(self, t): - r"'.*?'" + """'.*?'""" # strip quotes t.value = t.value[1:-1] return t @@ -196,13 +195,13 @@ def t_NUMERIC_LITERAL(self, t): # qualified identifiers have one or two dots def t_QUALIFIED_IDENTIFIER(self, t): - r"[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z_][a-zA-Z0-9_]*){1,2}" + r"""[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z_][a-zA-Z0-9_]*){1,2}""" t.type = "QUALIFIED_IDENTIFIER" return t # we only support ASCII in identifier names def t_SIMPLE_IDENTIFIER(self, t): - r"[a-zA-Z_][a-zA-Z0-9_]*" + """[a-zA-Z_][a-zA-Z0-9_]*""" # Check for reserved words and make sure they are upper case reserved = self.reserved.get(t.value.upper()) if reserved is not None: @@ -213,6 +212,6 @@ def t_SIMPLE_IDENTIFIER(self, t): return t def t_error(self, t): - "Error handling rule" + """Error handling rule""" lexer = t.lexer raise ParserLexError(lexer.lexdata, t.value, lexer.lexpos, lexer.lineno) diff --git a/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py b/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py index 43c7f361c4..af3963713a 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py +++ b/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py @@ -328,7 +328,7 @@ def p_predicate(self, p): else: p[0] = p[1] - def p_identifier(self, p): + def p_identifier(self, p): # noqa: D401, D403 """identifier : SIMPLE_IDENTIFIER | QUALIFIED_IDENTIFIER """ From 685d61e84b3b4d3cbb67d55c7838edebb76b5b33 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 15:59:52 -0700 Subject: [PATCH 09/16] Add D104 to pydocstyle ignore --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3e7a514dd2..217a637558 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -139,8 +139,8 @@ convention = "numpy" # We allow methods to inherit docstrings and this is not compatible with D102. # Docstring at the very first line is not required # D200, D205 and D400 all complain if the first sentence of the docstring does -# not fit on one line. -add-ignore = ["D107", "D105", "D102", "D100", "D200", "D205", "D400"] +# not fit on one line. We do not require docstrings in __init__ files (D104). +add-ignore = ["D107", "D105", "D102", "D100", "D200", "D205", "D400", "D104"] [tool.coverage.report] show_missing = true From 828c719c6459336e0b04f342f319d31c6d220916 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 16:00:02 -0700 Subject: [PATCH 10/16] Add a ruff configuration This runs clean with "ruff ." on tests and python directory. --- pyproject.toml | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 217a637558..cd81f4bffa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -151,3 +151,46 @@ exclude_lines = [ "if __name__ == .__main__.:", "if TYPE_CHECKING:", ] + +[tool.ruff] +exclude = [ + "__init__.py", + "lex.py", + "yacc.py", +] +ignore = [ + "N802", + "N803", + "N806", + "N812", + "N815", + "N816", + "N999", + "D107", + "D105", + "D102", + "D104", + "D100", + "D200", + "D205", + "D400", + "E402", # Module level import not at top of file (against Rubin style) +] +line-length = 110 +select = [ + "E", # pycodestyle + "F", # pycodestyle + "N", # pep8-naming + "W", # pycodestyle + "D", # pydocstyle +] +target-version = "py310" + +[tool.ruff.per-file-ignores] +"python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py" = ["D401", "D403"] + +[tool.ruff.pycodestyle] +max-doc-length = 79 + +[tool.ruff.pydocstyle] +convention = "numpy" From c8e7f066704673da2dcc7bb38d54e5e38370a7ac Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 16:04:37 -0700 Subject: [PATCH 11/16] Add ruff linter workflow --- .github/workflows/lint.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 796ef92f91..6c463eed53 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -9,3 +9,8 @@ on: jobs: call-workflow: uses: lsst/rubin_workflows/.github/workflows/lint.yaml@main + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: chartboost/ruff-action@v1 From c1e2469267e914b58de82c26dcd1119b1396df6b Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 16:52:24 -0700 Subject: [PATCH 12/16] Fix utils test since docstring was used in test itself Ignore the D401 error instead. --- tests/test_cliUtils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_cliUtils.py b/tests/test_cliUtils.py index 498d9260f2..1f7faff4d6 100644 --- a/tests/test_cliUtils.py +++ b/tests/test_cliUtils.py @@ -51,7 +51,7 @@ def testHelp(self): @repo_argument(help="repo help text") @directory_argument(help="directory help text") def cli(): - """Return the cli help message.""" + """The cli help message.""" # noqa: D401 pass self.runTest(cli) @@ -63,7 +63,7 @@ def testHelpWrapped(self): @repo_argument(help="repo help text") @directory_argument(help="directory help text") def cli(): - """Return the cli help message.""" + """The cli help message.""" # noqa: D401 pass self.runTest(cli) @@ -199,9 +199,9 @@ def test_help(self): arguments are declared. Verify that MWArgument adds " ..." after the option metavar when - `nargs` != 1. The default behavior of click is to add elipsis when - nargs does not equal 1, but it does not put a space before the elipsis - and we prefer a space between the metavar and the elipsis. + `nargs` != 1. The default behavior of click is to add ellipsis when + nargs does not equal 1, but it does not put a space before the ellipsis + and we prefer a space between the metavar and the ellipsis. """ # nargs can be -1 for any number of args, or >= 1 for a specified # number of arguments. From c88211114cf91f958af538c71c8b6a5f07060960 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 16:57:35 -0700 Subject: [PATCH 13/16] Enable RUF100 check (#noqa) and fix issues --- pyproject.toml | 3 +++ .../daf/butler/core/dimensions/_dataCoordinateIterable.py | 6 +++--- .../registry/queries/expressions/parser/parserYacc.py | 2 +- tests/test_cliUtilSplitCommas.py | 4 +--- tests/test_matplotlibFormatter.py | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index cd81f4bffa..05acb4f6a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -185,6 +185,9 @@ select = [ "D", # pydocstyle ] target-version = "py310" +extend-select = [ + "RUF100", # Warn about unused noqa +] [tool.ruff.per-file-ignores] "python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py" = ["D401", "D403"] diff --git a/python/lsst/daf/butler/core/dimensions/_dataCoordinateIterable.py b/python/lsst/daf/butler/core/dimensions/_dataCoordinateIterable.py index a03cd850f8..4b834adcca 100644 --- a/python/lsst/daf/butler/core/dimensions/_dataCoordinateIterable.py +++ b/python/lsst/daf/butler/core/dimensions/_dataCoordinateIterable.py @@ -712,11 +712,11 @@ def __eq__(self, other: Any) -> bool: def __getitem__(self, index: int) -> DataCoordinate: pass - @overload # noqa: F811 (FIXME: remove for py 3.8+) - def __getitem__(self, index: slice) -> DataCoordinateSequence: # noqa: F811 + @overload + def __getitem__(self, index: slice) -> DataCoordinateSequence: pass - def __getitem__(self, index: Any) -> Any: # noqa: F811 + def __getitem__(self, index: Any) -> Any: r = self._dataIds[index] if isinstance(index, slice): return DataCoordinateSequence( diff --git a/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py b/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py index af3963713a..43c7f361c4 100644 --- a/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py +++ b/python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py @@ -328,7 +328,7 @@ def p_predicate(self, p): else: p[0] = p[1] - def p_identifier(self, p): # noqa: D401, D403 + def p_identifier(self, p): """identifier : SIMPLE_IDENTIFIER | QUALIFIED_IDENTIFIER """ diff --git a/tests/test_cliUtilSplitCommas.py b/tests/test_cliUtilSplitCommas.py index 1b9ca50f05..5f18d531a7 100644 --- a/tests/test_cliUtilSplitCommas.py +++ b/tests/test_cliUtilSplitCommas.py @@ -48,9 +48,7 @@ def test_separate(self): """Test the split_commas callback by itself.""" ctx = "unused" param = "unused" - self.assertEqual( - split_commas(ctx, param, ("one,two", "three,four")), ("one", "two", "three", "four") # noqa E231 - ) + self.assertEqual(split_commas(ctx, param, ("one,two", "three,four")), ("one", "two", "three", "four")) self.assertEqual(split_commas(ctx, param, None), tuple()) def test_single(self): diff --git a/tests/test_matplotlibFormatter.py b/tests/test_matplotlibFormatter.py index 81cbe096c3..1b5d5dca35 100644 --- a/tests/test_matplotlibFormatter.py +++ b/tests/test_matplotlibFormatter.py @@ -30,7 +30,7 @@ try: import matplotlib - matplotlib.use("Agg") # noqa:E402 + matplotlib.use("Agg") from matplotlib import pyplot except ImportError: pyplot = None From d8fd1ff21dd6e71d4539ad39bf6d17cda3552b02 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 16:58:09 -0700 Subject: [PATCH 14/16] Fix typo in docstring --- python/lsst/daf/butler/cli/butler.py | 2 +- python/lsst/daf/butler/cli/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/lsst/daf/butler/cli/butler.py b/python/lsst/daf/butler/cli/butler.py index 61d72a3754..c6a2b22868 100755 --- a/python/lsst/daf/butler/cli/butler.py +++ b/python/lsst/daf/butler/cli/butler.py @@ -126,7 +126,7 @@ def getLocalCommands(self) -> defaultdict[str, list[str]]: ) def list_commands(self, ctx: click.Context) -> list[str]: - """Gget all the commands that can be called by the + """Get all the commands that can be called by the butler command, it is used to generate the --help output. Used by Click. diff --git a/python/lsst/daf/butler/cli/utils.py b/python/lsst/daf/butler/cli/utils.py index 8be1094bd2..080b30a797 100644 --- a/python/lsst/daf/butler/cli/utils.py +++ b/python/lsst/daf/butler/cli/utils.py @@ -621,7 +621,7 @@ def make_metavar(self) -> str: """Make the metavar for the help menu. Overrides `click.Option.make_metavar`. - Adds a space and an elipsis after the metavar name if + Adds a space and an ellipsis after the metavar name if the option accepts multiple inputs, otherwise defers to the base implementation. From 60970a23b4085df82851683437eb20160245db6c Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 21:58:24 -0700 Subject: [PATCH 15/16] Replace flake8 pre-commit with ruff --- .pre-commit-config.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2dca4fe07..7a43eaa695 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,8 @@ repos: hooks: - id: isort name: isort (python) - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.0.275 hooks: - - id: flake8 + - id: ruff From 8714926a35feeef6bc38ff04cdb30056851f9562 Mon Sep 17 00:00:00 2001 From: Tim Jenness Date: Fri, 23 Jun 2023 22:02:20 -0700 Subject: [PATCH 16/16] Add note indicating why parserYacc.py can't be fixed --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 05acb4f6a1..c407482b55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -190,6 +190,7 @@ extend-select = [ ] [tool.ruff.per-file-ignores] +# parserYacc docstrings can not be fixed. Docstrings are used to define grammar. "python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py" = ["D401", "D403"] [tool.ruff.pycodestyle]