diff --git a/python/lsst/daf/butler/direct_butler/_direct_butler_collections.py b/python/lsst/daf/butler/direct_butler/_direct_butler_collections.py index a84cebecde..0d2d549972 100644 --- a/python/lsst/daf/butler/direct_butler/_direct_butler_collections.py +++ b/python/lsst/daf/butler/direct_butler/_direct_butler_collections.py @@ -117,52 +117,51 @@ def query_info( summary_datasets: Iterable[DatasetType] | Iterable[str] | None = None, ) -> Sequence[CollectionInfo]: info = [] - with self._registry.caching_context(): - if collection_types is None: - collection_types = CollectionType.all() - elif isinstance(collection_types, CollectionType): - collection_types = {collection_types} - - records = self._registry._managers.collections.resolve_wildcard( - CollectionWildcard.from_expression(expression), - collection_types=collection_types, - flatten_chains=flatten_chains, - include_chains=include_chains, - ) + if collection_types is None: + collection_types = CollectionType.all() + elif isinstance(collection_types, CollectionType): + collection_types = {collection_types} + + records = self._registry._managers.collections.resolve_wildcard( + CollectionWildcard.from_expression(expression), + collection_types=collection_types, + flatten_chains=flatten_chains, + include_chains=include_chains, + ) - summaries: Mapping[Any, CollectionSummary] = {} - if include_summary: - summaries = self._registry._managers.datasets.fetch_summaries(records, summary_datasets) - - docs: Mapping[Any, str] = {} - if include_doc: - docs = self._registry._managers.collections.get_docs(record.key for record in records) - - for record in records: - doc = docs.get(record.key, "") - children: tuple[str, ...] = tuple() - if record.type == CollectionType.CHAINED: - assert isinstance(record, ChainedCollectionRecord) - children = tuple(record.children) - parents: frozenset[str] | None = None - if include_parents: - # TODO: This is non-vectorized, so expensive to do in a - # loop. - parents = frozenset(self._registry.getCollectionParentChains(record.name)) - dataset_types: Set[str] | None = None - if summary := summaries.get(record.key): - dataset_types = frozenset([dt.name for dt in summary.dataset_types]) - - info.append( - CollectionInfo( - name=record.name, - type=record.type, - doc=doc, - parents=parents, - children=children, - dataset_types=dataset_types, - ) + summaries: Mapping[Any, CollectionSummary] = {} + if include_summary: + summaries = self._registry._managers.datasets.fetch_summaries(records, summary_datasets) + + docs: Mapping[Any, str] = {} + if include_doc: + docs = self._registry._managers.collections.get_docs(record.key for record in records) + + for record in records: + doc = docs.get(record.key, "") + children: tuple[str, ...] = tuple() + if record.type == CollectionType.CHAINED: + assert isinstance(record, ChainedCollectionRecord) + children = tuple(record.children) + parents: frozenset[str] | None = None + if include_parents: + # TODO: This is non-vectorized, so expensive to do in a + # loop. + parents = frozenset(self._registry.getCollectionParentChains(record.name)) + dataset_types: Set[str] | None = None + if summary := summaries.get(record.key): + dataset_types = frozenset([dt.name for dt in summary.dataset_types]) + + info.append( + CollectionInfo( + name=record.name, + type=record.type, + doc=doc, + parents=parents, + children=children, + dataset_types=dataset_types, ) + ) return info diff --git a/python/lsst/daf/butler/tests/butler_queries.py b/python/lsst/daf/butler/tests/butler_queries.py index 34ae03be95..1165e4e054 100644 --- a/python/lsst/daf/butler/tests/butler_queries.py +++ b/python/lsst/daf/butler/tests/butler_queries.py @@ -1804,6 +1804,21 @@ def test_calibration_join_queries(self) -> None: ], ) + def test_collection_query_info(self) -> None: + butler = self.make_butler("base.yaml", "datasets.yaml") + + info = butler.collections.query_info("imported_g", include_summary=True) + self.assertEqual(len(info), 1) + dataset_types = info[0].dataset_types + assert dataset_types is not None + self.assertCountEqual(dataset_types, ["flat", "bias"]) + + info = butler.collections.query_info("imported_g", include_summary=True, summary_datasets=["flat"]) + self.assertEqual(len(info), 1) + dataset_types = info[0].dataset_types + assert dataset_types is not None + self.assertCountEqual(dataset_types, ["flat"]) + def _get_exposure_ids_from_dimension_records(dimension_records: Iterable[DimensionRecord]) -> list[int]: output = []