Skip to content

Commit

Permalink
Remove 'x_' (experimental prefix from Query.general.
Browse files Browse the repository at this point in the history
  • Loading branch information
TallJimbo committed Sep 10, 2024
1 parent ba6a6af commit 8b11755
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 21 deletions.
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/queries/_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def dimension_records(self, element: str) -> DimensionRecordQueryResults:
result_spec = DimensionRecordResultSpec(element=self._driver.universe[element])
return DimensionRecordQueryResults(self._driver, tree, result_spec)

def x_general(
def general(
self,
dimensions: DimensionGroup | Iterable[str],
*names: str,
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/registry/sql_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -2428,7 +2428,7 @@ def queryDatasetAssociations(
)
with self._query() as query:
query = query.join_dataset_search(datasetType, resolved_collections)
result = query.x_general(
result = query.general(
datasetType.dimensions,
dataset_fields={datasetType.name: {"dataset_id", "run", "collection", "timespan"}},
find_first=False,
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/remote_butler/_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ def queryDatasetAssociations(
)
with self._butler.query() as query:
query = query.join_dataset_search(datasetType, resolved_collections)
result = query.x_general(
result = query.general(
datasetType.dimensions,
dataset_fields={datasetType.name: {"dataset_id", "run", "collection", "timespan"}},
find_first=False,
Expand Down
32 changes: 15 additions & 17 deletions python/lsst/daf/butler/tests/butler_queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def test_general_query(self) -> None:
# Do simple dimension queries.
with butler.query() as query:
query = query.join_dimensions(dimensions)
rows = list(query.x_general(dimensions).order_by("detector"))
rows = list(query.general(dimensions).order_by("detector"))
self.assertEqual(
rows,
[
Expand All @@ -321,7 +321,7 @@ def test_general_query(self) -> None:
],
)
rows = list(
query.x_general(dimensions, "detector.full_name", "purpose").order_by(
query.general(dimensions, "detector.full_name", "purpose").order_by(
"-detector.purpose", "full_name"
)
)
Expand Down Expand Up @@ -355,7 +355,7 @@ def test_general_query(self) -> None:
],
)
rows = list(
query.x_general(dimensions, "detector.full_name", "purpose").where(
query.general(dimensions, "detector.full_name", "purpose").where(
"instrument = 'Cam1' AND purpose = 'WAVEFRONT'"
)
)
Expand All @@ -370,7 +370,7 @@ def test_general_query(self) -> None:
},
],
)
result = query.x_general(dimensions, dimension_fields={"detector": {"full_name"}})
result = query.general(dimensions, dimension_fields={"detector": {"full_name"}})
self.assertEqual(set(row["detector.full_name"] for row in result), {"Aa", "Ab", "Ba", "Bb"})

# Use "flat" whose dimension group includes implied dimension.
Expand All @@ -381,7 +381,7 @@ def test_general_query(self) -> None:
with butler.query() as query:
query = query.join_dataset_search("flat", "imported_g")
# This just returns data IDs.
rows = list(query.x_general(dimensions).order_by("detector"))
rows = list(query.general(dimensions).order_by("detector"))
self.assertEqual(
rows,
[
Expand All @@ -391,7 +391,7 @@ def test_general_query(self) -> None:
],
)

result = query.x_general(dimensions, dataset_fields={"flat": ...}, find_first=True).order_by(
result = query.general(dimensions, dataset_fields={"flat": ...}, find_first=True).order_by(
"detector"
)
ids = {row["flat.dataset_id"] for row in result}
Expand Down Expand Up @@ -437,7 +437,7 @@ def test_general_query(self) -> None:
with butler.query() as query:
query = query.join_dataset_search("flat", ["tagged"])

result = query.x_general(
result = query.general(
dimensions, "flat.dataset_id", "flat.run", "flat.collection", find_first=False
)
row_tuples = list(result.iter_tuples(flat))
Expand All @@ -448,7 +448,7 @@ def test_general_query(self) -> None:
# Query calib collection.
with butler.query() as query:
query = query.join_dataset_search("flat", ["calib"])
result = query.x_general(
result = query.general(
dimensions,
"flat.dataset_id",
"flat.run",
Expand All @@ -468,7 +468,7 @@ def test_general_query(self) -> None:
# Query both tagged and calib collection.
with butler.query() as query:
query = query.join_dataset_search("flat", ["tagged", "calib"])
result = query.x_general(
result = query.general(
dimensions,
"flat.dataset_id",
"flat.run",
Expand Down Expand Up @@ -496,9 +496,7 @@ def test_query_ingest_date(self) -> None:
# for schema versions 1 and 2 of datasets manager.
with butler.query() as query:
query = query.join_dataset_search("flat", "imported_g")
rows = list(
query.x_general(dimensions, dataset_fields={"flat": {"ingest_date"}}, find_first=False)
)
rows = list(query.general(dimensions, dataset_fields={"flat": {"ingest_date"}}, find_first=False))
self.assertEqual(len(rows), 3)
for row in rows:
self.assertIsInstance(row["flat.ingest_date"], astropy.time.Time)
Expand All @@ -507,14 +505,14 @@ def test_query_ingest_date(self) -> None:
with butler.query() as query:
query = query.join_dataset_search("flat", "imported_g")
query1 = query.where("flat.ingest_date < before_ingest", bind={"before_ingest": before_ingest})
rows = list(query1.x_general(dimensions))
rows = list(query1.general(dimensions))
self.assertEqual(len(rows), 0)
query1 = query.where("flat.ingest_date >= before_ingest", bind={"before_ingest": before_ingest})
rows = list(query1.x_general(dimensions))
rows = list(query1.general(dimensions))
self.assertEqual(len(rows), 3)
# Same with a time in string literal.
query1 = query.where(f"flat.ingest_date < T'mjd/{before_ingest.tai.mjd}'")
rows = list(query1.x_general(dimensions))
rows = list(query1.general(dimensions))
self.assertEqual(len(rows), 0)

def test_implied_union_record_query(self) -> None:
Expand Down Expand Up @@ -1756,7 +1754,7 @@ def test_calibration_join_queries(self) -> None:
for data_id, refs, _ in q.where(
x["bias"].timespan.overlaps(x.exposure.timespan), base_data_id
)
.x_general(
.general(
butler.dimensions.conform(["exposure", "detector"]),
dataset_fields={"bias": ...},
find_first=True,
Expand All @@ -1779,7 +1777,7 @@ def test_calibration_join_queries(self) -> None:
[
(data_id, refs[0])
for data_id, refs, _ in q.where(base_data_id)
.x_general(["exposure"], dataset_fields={"bias": ...}, find_first=True)
.general(["exposure"], dataset_fields={"bias": ...}, find_first=True)
.iter_tuples(bias)
],
[
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/transfers/_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def _computeDatasetAssociations(self) -> dict[str, list[DatasetAssociation]]:
)
with self._butler.query() as query:
query = query.join_dataset_search(datasetType, resolved_collections)
result = query.x_general(
result = query.general(
datasetType.dimensions,
dataset_fields={datasetType.name: {"dataset_id", "run", "collection", "timespan"}},
find_first=False,
Expand Down

0 comments on commit 8b11755

Please sign in to comment.