Skip to content

Commit

Permalink
Doc fixes following review
Browse files Browse the repository at this point in the history
  • Loading branch information
timj committed Jun 10, 2023
1 parent a774f2d commit bfed269
Show file tree
Hide file tree
Showing 7 changed files with 41 additions and 39 deletions.
10 changes: 5 additions & 5 deletions python/lsst/pipe/base/connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,16 +60,16 @@ class ScalarError(TypeError):


class PipelineTaskConnectionDict(UserDict):
"""A special dict class used by PipelineTaskConnectionMetaclass
"""A special dict class used by `PipelineTaskConnectionMetaclass`.
This dict is used in PipelineTaskConnection class creation, as the
dictionary that is initially used as __dict__. It exists to
intercept connection fields declared in a PipelineTaskConnection, and
This dict is used in `PipelineTaskConnection` class creation, as the
dictionary that is initially used as ``__dict__``. It exists to
intercept connection fields declared in a `PipelineTaskConnection`, and
what name is used to identify them. The names are then added to class
level list according to the connection type of the class attribute. The
names are also used as keys in a class level dictionary associated with
the corresponding class attribute. This information is a duplicate of
what exists in __dict__, but provides a simple place to lookup and
what exists in ``__dict__``, but provides a simple place to lookup and
iterate on only these variables.
"""

Expand Down
4 changes: 2 additions & 2 deletions python/lsst/pipe/base/executionButlerBuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ def _validate_dataset_type(
----------
candidate : `lsst.daf.butler.DatasetType`
The candidate dataset type.
previous : `dict` [Union[`str`, `~lsst.daf.butler.DatasetType``], \
`~lsst.daf.butler.DatasetType``]
previous : `dict` [ `str` | `~lsst.daf.butler.DatasetType`, \
`~lsst.daf.butler.DatasetType`]
Previous dataset types found, indexed by name and also by
dataset type. The latter provides a quick way of returning a
previously checked dataset type.
Expand Down
16 changes: 8 additions & 8 deletions python/lsst/pipe/base/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,14 +437,14 @@ def getQuantaForTask(self, taskDef: TaskDef) -> frozenset[Quantum]:
Returns
-------
frozenset of `~lsst.daf.butler.Quantum`
quanta : `frozenset` of `~lsst.daf.butler.Quantum`
The `set` of `~lsst.daf.butler.Quantum` that is associated with the
specified `TaskDef`.
"""
return frozenset(node.quantum for node in self._taskToQuantumNode.get(taskDef, ()))

def getNumberOfQuantaForTask(self, taskDef: TaskDef) -> int:
"""Return all the number of `~lsst.daf.butler.Quantum` associated with
"""Return the number of `~lsst.daf.butler.Quantum` associated with
a `TaskDef`.
Parameters
Expand Down Expand Up @@ -517,7 +517,7 @@ def findTaskWithOutput(self, datasetTypeName: DatasetTypeName) -> TaskDef | None
Returns
-------
result : `TaskDef` or `None`
`TaskDef` that outputs `DatasetTypeName` as an output or None if
`TaskDef` that outputs `DatasetTypeName` as an output or `None` if
none of the tasks produce this `DatasetTypeName`.
Raises
Expand Down Expand Up @@ -841,7 +841,7 @@ def initInputRefs(self, taskDef: TaskDef) -> list[DatasetRef] | None:
Returns
-------
refs : `list` [ `lsst.daf.butler.DatasetRef` ] or None
refs : `list` [ `~lsst.daf.butler.DatasetRef` ] or `None`
DatasetRef for the task InitInput, can be `None`. This can return
either resolved or non-resolved reference.
"""
Expand All @@ -857,7 +857,7 @@ def initOutputRefs(self, taskDef: TaskDef) -> list[DatasetRef] | None:
Returns
-------
refs : `list` [ `~lsst.daf.butler.DatasetRef` ] or None
refs : `list` [ `~lsst.daf.butler.DatasetRef` ] or `None`
DatasetRefs for the task InitOutput, can be `None`. This can return
either resolved or non-resolved reference. Resolved reference will
match Quantum's initInputs if this is an intermediate dataset type.
Expand Down Expand Up @@ -900,14 +900,14 @@ def loadUri(
----------
uri : convertible to `~lsst.resources.ResourcePath`
URI from where to load the graph.
universe : `~lsst.daf.butler.DimensionUniverse` optional
universe : `~lsst.daf.butler.DimensionUniverse`, optional
`~lsst.daf.butler.DimensionUniverse` instance, not used by the
method itself but needed to ensure that registry data structures
are initialized. If `None` it is loaded from the `QuantumGraph`
saved structure. If supplied, the
`~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
will be validated against the supplied argument for compatibility.
nodes : iterable of `uuid.UUID` or None
nodes : iterable of `uuid.UUID` or `None`
UUIDs that correspond to nodes in the graph. If specified, only
these nodes will be loaded. Defaults to None, in which case all
nodes will be loaded.
Expand Down Expand Up @@ -1201,7 +1201,7 @@ def load(
saved structure. If supplied, the
`~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
will be validated against the supplied argument for compatibility.
nodes : iterable of `uuid.UUID` or None
nodes : iterable of `uuid.UUID` or `None`
UUIDs that correspond to nodes in the graph. If specified, only
these nodes will be loaded. Defaults to None, in which case all
nodes will be loaded.
Expand Down
12 changes: 6 additions & 6 deletions python/lsst/pipe/base/graphBuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def unpackSingleRefs(self, storage_classes: dict[str, str]) -> NamedKeyDict[Data
Returns
-------
dictionary : `NamedKeyDict`
dictionary : `~lsst.daf.butler.NamedKeyDict`
Dictionary mapping `~lsst.daf.butler.DatasetType` to
`~lsst.daf.butler.DatasetRef`, with both
`~lsst.daf.butler.DatasetType` instances and string names usable
Expand All @@ -291,7 +291,7 @@ def unpackMultiRefs(self, storage_classes: dict[str, str]) -> NamedKeyDict[Datas
Returns
-------
dictionary : `NamedKeyDict`
dictionary : `~lsst.daf.butler.NamedKeyDict`
Dictionary mapping `~lsst.daf.butler.DatasetType` to `list` of
`~lsst.daf.butler.DatasetRef`, with both
`~lsst.daf.butler.DatasetType` instances and string names usable
Expand Down Expand Up @@ -419,7 +419,7 @@ def makeQuantum(self, datastore_records: Mapping[str, DatastoreRecordData] | Non
Parameters
----------
datastore_records : `dict` [ `str`, \
datastore_records : `~collections.abc.Mapping` [ `str`, \
`~lsst.daf.butler.DatastoreRecordData` ], optional
If not `None` then fill datastore records in each generated Quantum
using the records from this structure.
Expand Down Expand Up @@ -784,13 +784,13 @@ def __repr__(self) -> str:

defaultDatasetQueryConstraints: NamedValueSet[DatasetType]
"""Datasets that should be used as constraints in the initial query,
according to tasks (`NamedValueSet`).
according to tasks (`~lsst.daf.butler.NamedValueSet`).
"""

dimensions: DimensionGraph
"""All dimensions used by any regular input, intermediate, or output
(not prerequisite) dataset; the set of dimension used in the "Big Join
Query" (`DimensionGraph`).
Query" (`~lsst.daf.butler.DimensionGraph`).
This is required to be a superset of all task quantum dimensions.
"""
Expand Down Expand Up @@ -1393,7 +1393,7 @@ def makeQuantumGraph(
----------
registry : `lsst.daf.butler.Registry`
Registry for the data repository; used for all data ID queries.
metadata : Optional Mapping of `str` to primitives
metadata : `~collections.abc.Mapping` of `str` to primitives, optional
This is an optional parameter of extra data to carry with the
graph. Entries in this mapping should be able to be serialized in
JSON.
Expand Down
11 changes: 6 additions & 5 deletions python/lsst/pipe/base/pipeTools.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,12 @@ def isPipelineOrdered(pipeline: Pipeline | Iterable[TaskDef], taskFactory: TaskF
Raises
------
`ImportError` is raised when task class cannot be imported.
`DuplicateOutputError` is raised when there is more than one producer for a
dataset type.
`MissingTaskFactoryError` is raised when TaskFactory is needed but not
provided.
ImportError
Raised when task class cannot be imported.
DuplicateOutputError
Raised when there is more than one producer for a dataset type.
MissingTaskFactoryError
Raised when TaskFactory is needed but not provided.
"""
# Build a map of DatasetType name to producer's index in a pipeline
producerIndex = {}
Expand Down
4 changes: 2 additions & 2 deletions python/lsst/pipe/base/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ def write_to_uri(self, uri: ResourcePathExpression) -> None:
uri : convertible to `~lsst.resources.ResourcePath`
URI to write to; may have any scheme with
`~lsst.resources.ResourcePath` write support or no scheme for a
local file/directory. Should have a ``.yaml``.
local file/directory. Should have a ``.yaml`` extension.
"""
self._pipelineIR.write_to_uri(uri)

Expand Down Expand Up @@ -932,7 +932,7 @@ def makeDatasetTypesSet(
is_input: bool,
freeze: bool = True,
) -> NamedValueSet[DatasetType]:
"""Construct a set of true `~lsst.daf.butler.DatasetType` objects
"""Construct a set of true `~lsst.daf.butler.DatasetType` objects.
Parameters
----------
Expand Down
23 changes: 12 additions & 11 deletions python/lsst/pipe/base/pipelineIR.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,25 +188,26 @@ def to_primitives(self) -> dict[str, list[str] | str]:
class ParametersIR:
"""Intermediate representation of parameters that are global to a pipeline
These parameters are specified under a top level key named `parameters`
These parameters are specified under a top level key named ``parameters``
and are declared as a yaml mapping. These entries can then be used inside
task configuration blocks to specify configuration values. They may not be
used in the special ``file`` or ``python`` blocks.
Examples
--------
.. code-block:: yaml
parameters:
shared_value: 14
tasks:
taskA:
class: modA
config:
field1: parameters.shared_value
taskB:
class: modB
config:
field2: parameters.shared_value
shared_value: 14
tasks:
taskA:
class: modA
config:
field1: parameters.shared_value
taskB:
class: modB
config:
field2: parameters.shared_value
"""

mapping: MutableMapping[str, str]
Expand Down

0 comments on commit bfed269

Please sign in to comment.