diff --git a/.bumpversion.cfg b/.bumpversion.cfg index e16214c8..d136e36d 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 5.11.0 +current_version = 5.12.1 commit = True tag = True diff --git a/README.rst b/README.rst index 7f20499a..4397320d 100644 --- a/README.rst +++ b/README.rst @@ -9,3 +9,18 @@ CAVEclient ########################### This repository supplies client side code to interact with microservices in the Connectome Annotation Versioning Engine (CAVE). + +Installation +########################### +Can be installed from pypi +:: + + pip install caveclient + +Documentation +############# +You can find full documentation on readthedocs (https://caveclient.readthedocs.io). + +Usage examples +############## +- Tutorial notebook for accessing the FlyWire Connectome dataset: https://github.com/seung-lab/FlyConnectome/blob/main/CAVE%20tutorial.ipynb diff --git a/caveclient/__init__.py b/caveclient/__init__.py index d986b75b..b6e36c22 100644 --- a/caveclient/__init__.py +++ b/caveclient/__init__.py @@ -1,3 +1,3 @@ -__version__ = "5.11.0" +__version__ = "5.12.1" from .frameworkclient import CAVEclient diff --git a/caveclient/chunkedgraph.py b/caveclient/chunkedgraph.py index 1fe4efdf..97baa118 100644 --- a/caveclient/chunkedgraph.py +++ b/caveclient/chunkedgraph.py @@ -1,6 +1,7 @@ """PyChunkedgraph service python interface""" import datetime import json +import logging from typing import Iterable from urllib.parse import urlencode @@ -17,8 +18,11 @@ default_global_server_address, ) + SERVER_KEY = "cg_server_address" +logger = logging.getLogger(__name__) + def package_bounds(bounds): if bounds.shape != (3, 2): @@ -78,11 +82,7 @@ def root_id_int_list_check( root_id, make_unique=False, ): - if ( - isinstance(root_id, int) - or isinstance(root_id, np.uint64) - or isinstance(root_id, np.int64) - ): + if isinstance(root_id, (int, np.uint64, np.int64)): root_id = [root_id] elif isinstance(root_id, str): try: @@ -198,7 +198,7 @@ def _process_timestamp(self, timestamp): if self._default_timestamp is not None: return self._default_timestamp else: - return datetime.datetime.utcnow() + return datetime.datetime.now(datetime.timezone.utc) else: return timestamp @@ -848,7 +848,13 @@ def get_operation_details(self, operation_ids: Iterable[int]): return r.json() def get_lineage_graph( - self, root_id, timestamp_past=None, timestamp_future=None, as_nx_graph=False + self, + root_id, + timestamp_past=None, + timestamp_future=None, + as_nx_graph=False, + exclude_links_to_future=False, + exclude_links_to_past=False, ): """ Returns the lineage graph for a root ID, optionally cut off in the past or @@ -869,6 +875,14 @@ def get_lineage_graph( Cutoff for the lineage graph going forwards in time. By default, None. as_nx_graph: bool If True, a NetworkX graph is returned. + exclude_links_to_future: bool + If True, links from nodes before `timestamp_future` to after + `timestamp_future` are removed. If False, the link(s) which has one node + before timestamp and one node after timestamp is kept. + exclude_links_to_past: bool + If True, links from nodes before `timestamp_past` to after `timestamp_past` + are removed. If False, the link(s) which has one node before timestamp and + one node after timestamp is kept. Returns ------- @@ -908,20 +922,53 @@ def get_lineage_graph( data = json.dumps({"root_ids": root_id}, cls=BaseEncoder) r = handle_response(self.session.post(url, data=data, params=params)) + if exclude_links_to_future or exclude_links_to_past: + bad_ids = [] + for node in r["nodes"]: + node_ts = datetime.datetime.fromtimestamp(node["timestamp"]) + node_ts = node_ts.astimezone(datetime.timezone.utc) + if ( + exclude_links_to_past and (node_ts < timestamp_past) + if timestamp_past is not None + else False + ): + bad_ids.append(node["id"]) + if ( + exclude_links_to_future and (node_ts > timestamp_future) + if timestamp_future is not None + else False + ): + bad_ids.append(node["id"]) + + r["nodes"] = [node for node in r["nodes"] if node["id"] not in bad_ids] + r["links"] = [ + link + for link in r["links"] + if link["source"] not in bad_ids and link["target"] not in bad_ids + ] + if as_nx_graph: return nx.node_link_graph(r) else: return r - def get_latest_roots(self, root_id, timestamp_future=None): - """Returns root IDs that are the latest successors of a given root ID. + def get_latest_roots(self, root_id, timestamp=None, timestamp_future=None): + """ + Returns root IDs that are related to the given `root_id` at a given + timestamp. Can be used to find the "latest" root IDs associated with an object. + Parameters ---------- root_id : int Object root ID. + timestamp : datetime.datetime or None, optional + Timestamp of where to query IDs from. If None then will assume you want + till now. timestamp_future : datetime.datetime or None, optional - Cutoff for the search going forwards in time. By default, None. + DEPRECATED name, use `timestamp` instead. + Timestamp to suggest IDs from (note can be in the past relative to the + root). By default, None. Returns ------- @@ -930,19 +977,42 @@ def get_latest_roots(self, root_id, timestamp_future=None): """ root_id = root_id_int_list_check(root_id, make_unique=True) - timestamp_past = self.get_root_timestamps(root_id).min() - - lineage_graph = self.get_lineage_graph( - root_id, - timestamp_past=timestamp_past, - timestamp_future=timestamp_future, - as_nx_graph=True, - ) + timestamp_root = self.get_root_timestamps(root_id).min() + if timestamp_future is not None: + logger.warning("timestamp_future is deprecated, use timestamp instead") + timestamp = timestamp_future - out_degree_dict = dict(lineage_graph.out_degree) - nodes = np.array(list(out_degree_dict.keys())) - out_degrees = np.array(list(out_degree_dict.values())) - return nodes[out_degrees == 0] + if timestamp is None: + timestamp = datetime.datetime.now(datetime.timezone.utc) + elif timestamp.tzinfo is None: + timestamp = timestamp.replace(tzinfo=datetime.timezone.utc) + + # or if timestamp_root is less than timestamp_future + if (timestamp is None) or (timestamp_root < timestamp): + lineage_graph = self.get_lineage_graph( + root_id, + timestamp_past=timestamp_root, + timestamp_future=timestamp, + exclude_links_to_future=True, + as_nx_graph=True, + ) + # then we want the leaves of the tree + out_degree_dict = dict(lineage_graph.out_degree) + nodes = np.array(list(out_degree_dict.keys())) + out_degrees = np.array(list(out_degree_dict.values())) + return nodes[out_degrees == 0] + else: + # then timestamp is in fact in the past + lineage_graph = self.get_lineage_graph( + root_id, + timestamp_future=timestamp_root, + timestamp_past=timestamp, + as_nx_graph=True, + ) + in_degree_dict = dict(lineage_graph.in_degree) + nodes = np.array(list(in_degree_dict.keys())) + in_degrees = np.array(list(in_degree_dict.values())) + return nodes[in_degrees == 0] def get_original_roots(self, root_id, timestamp_past=None): """Returns root IDs that are the latest successors of a given root ID. @@ -1045,7 +1115,8 @@ def suggest_latest_roots( If True, return all fractions sorted by most overlap to least, by default False. If False, only the top value is returned. """ - curr_ids = self.get_latest_roots(root_id, timestamp_future=timestamp) + curr_ids = self.get_latest_roots(root_id, timestamp=timestamp) + if root_id in curr_ids: if return_all: if return_fraction_overlap: @@ -1067,6 +1138,14 @@ def suggest_latest_roots( stop_layer = max(1, stop_layer) chunks_orig = self.get_leaves(root_id, stop_layer=stop_layer) + while len(chunks_orig) == 0: + stop_layer -= 1 + if stop_layer == 1: + raise ValueError( + f"There were no children for root_id={root_id} at level 2, something is wrong with the chunkedgraph" + ) + chunks_orig = self.get_leaves(root_id, stop_layer=stop_layer) + chunk_list = np.array( [ len( @@ -1228,7 +1307,9 @@ def get_past_ids(self, root_ids, timestamp_past=None, timestamp_future=None): def get_delta_roots( self, timestamp_past: datetime.datetime, - timestamp_future: datetime.datetime = datetime.datetime.utcnow(), + timestamp_future: datetime.datetime = datetime.datetime.now( + datetime.timezone.utc + ), ): """ Get the list of roots that have changed between `timetamp_past` and @@ -1240,7 +1321,8 @@ def get_delta_roots( timestamp_past : datetime.datetime Past timepoint to query timestamp_future : datetime.datetime, optional - Future timepoint to query. Default is now. + Future timepoint to query. Defaults to + ``datetime.datetime.now(datetime.timezone.utc)``. Returns ------- diff --git a/caveclient/endpoints.py b/caveclient/endpoints.py index e3e6bd0f..a5f3644b 100644 --- a/caveclient/endpoints.py +++ b/caveclient/endpoints.py @@ -61,14 +61,14 @@ "join_query": mat_v3_api + "/datastack/{datastack_name}/version/{version}/query", "table_count": mat_v2_api + "/datastack/{datastack_name}/version/{version}/table/{table_name}/count", - "versions": mat_v2_api + "/datastack/{datastack_name}/versions", - "version_metadata": mat_v2_api + "/datastack/{datastack_name}/version/{version}", + "versions": mat_v3_api + "/datastack/{datastack_name}/versions", + "version_metadata": mat_v3_api + "/datastack/{datastack_name}/version/{version}", "tables": mat_v2_api + "/datastack/{datastack_name}/version/{version}/tables", "metadata": mat_v3_api + "/datastack/{datastack_name}/version/{version}/table/{table_name}/metadata", "all_tables_metadata": mat_v3_api + "/datastack/{datastack_name}/version/{version}/tables/metadata", - "versions_metadata": mat_v2_api + "/datastack/{datastack_name}/metadata", + "versions_metadata": mat_v3_api + "/datastack/{datastack_name}/metadata", "ingest_annotation_table": mat_v2_api + "/materialize/run/ingest_annotations/datastack/{datastack_name}/{table_name}", "segmentation_metadata": mat_v3_api @@ -85,6 +85,8 @@ + "/datastack/{datastack_name}/version/{version}/views/{view_name}/schema", "view_schemas": mat_v3_api + "/datastack/{datastack_name}/version/{version}/views/schemas", + "unique_string_values": mat_v3_api + + "/datastack/{datastack_name}/table/{table_name}/unique_string_values", } materialization_api_versions = { diff --git a/caveclient/materializationengine.py b/caveclient/materializationengine.py index c4f53bbf..f9260bd6 100644 --- a/caveclient/materializationengine.py +++ b/caveclient/materializationengine.py @@ -29,6 +29,7 @@ DEFAULT_COMPRESSION = "zstd" + def deserialize_query_response(response): """Deserialize pyarrow responses""" content_type = response.headers.get("Content-Type") @@ -110,7 +111,7 @@ def concatenate_position_columns(df, inplace=False): def convert_timestamp(ts: datetime): if ts == "now": - ts = datetime.utcnow() + ts = datetime.now(timezone.utc) if isinstance(ts, datetime): if ts.tzinfo is None: @@ -291,7 +292,7 @@ def most_recent_version(self, datastack_name=None): versions = self.get_versions(datastack_name=datastack_name) return np.max(np.array(versions)) - def get_versions(self, datastack_name=None): + def get_versions(self, datastack_name=None, expired=False): """get versions available Args: @@ -302,7 +303,8 @@ def get_versions(self, datastack_name=None): endpoint_mapping = self.default_url_mapping endpoint_mapping["datastack_name"] = datastack_name url = self._endpoints["versions"].format_map(endpoint_mapping) - response = self.session.get(url) + query_args = {"expired": expired} + response = self.session.get(url, params=query_args) self.raise_for_status(response) return response.json() @@ -421,11 +423,12 @@ def get_timestamp(self, version: int = None, datastack_name: str = None): return convert_timestamp(meta["time_stamp"]) @cached(cache=TTLCache(maxsize=100, ttl=60 * 60 * 12)) - def get_versions_metadata(self, datastack_name=None): + def get_versions_metadata(self, datastack_name=None, expired=False): """get the metadata for all the versions that are presently available and valid Args: datastack_name (str, optional): datastack to query. If None, defaults to the value set in the client. + expired (bool, optional): whether to include expired versions. Defaults to False. Returns: list[dict]: a list of metadata dictionaries @@ -435,7 +438,8 @@ def get_versions_metadata(self, datastack_name=None): endpoint_mapping = self.default_url_mapping endpoint_mapping["datastack_name"] = datastack_name url = self._endpoints["versions_metadata"].format_map(endpoint_mapping) - response = self.session.get(url) + query_args = {"expired": expired} + response = self.session.get(url, params=query_args) d = handle_response(response) for md in d: md["time_stamp"] = convert_timestamp(md["time_stamp"]) @@ -1180,7 +1184,7 @@ def live_live_query( allow_missing_lookups (bool, optional): If there are annotations without supervoxels and rootids yet, allow results. Defaults to False. random_sample: (int, optional) : if given, will do a tablesample of the table to return that many annotations Example: - live_live_query("table_name",datetime.datetime.utcnow(), + live_live_query("table_name",datetime.datetime.now(datetime.timezone.utc), joins=[[table_name, table_column, joined_table, joined_column], [joined_table, joincol2, third_table, joincol_third]] suffixes={ @@ -1232,7 +1236,7 @@ def live_live_query( data = {} query_args = {} query_args["return_pyarrow"] = True - query_args['arrow_format'] = True + query_args["arrow_format"] = True query_args["merge_reference"] = False query_args["allow_missing_lookups"] = allow_missing_lookups if random_sample: @@ -1349,7 +1353,7 @@ def live_query( Args: table: 'str' timestamp (datetime.datetime): time to materialize (in utc) - pass datetime.datetime.utcnow() for present time + pass datetime.datetime.now(datetime.timezone.utc) for present time filter_in_dict (dict , optional): keys are column names, values are allowed entries. Defaults to None. @@ -1822,6 +1826,7 @@ def live_live_query( desired_resolution: Iterable = None, allow_missing_lookups: bool = False, allow_invalid_root_ids: bool = False, + random_sample: int = None, ): """Beta method for querying cave annotation tables with rootIDs and annotations at a particular timestamp. Note: this method requires more explicit mapping of filters and selection to table @@ -1846,8 +1851,9 @@ def live_live_query( desired_resolution (Iterable, optional): What resolution to convert position columns to. Defaults to None will use defaults. allow_missing_lookups (bool, optional): If there are annotations without supervoxels and rootids yet, allow results. Defaults to False. allow_invalid_root_ids (bool, optional): If True, ignore root ids not valid at the given timestamp, otherwise raise an Error. Defaults to False. + random_sample (int, optional): If given, will do a tablesample of the table to return that many annotations Example: - live_live_query("table_name",datetime.datetime.utcnow(), + live_live_query("table_name",datetime.datetime.now(datetime.timezone.utc), joins=[[table_name, table_column, joined_table, joined_column], [joined_table, joincol2, third_table, joincol_third]] suffixes={ @@ -1899,10 +1905,12 @@ def live_live_query( data = {} query_args = {} query_args["return_pyarrow"] = True - query_args['arrow_format'] = True + query_args["arrow_format"] = True query_args["merge_reference"] = False query_args["allow_missing_lookups"] = allow_missing_lookups query_args["allow_invalid_root_ids"] = allow_invalid_root_ids + if random_sample: + query_args["random_sample"] = random_sample data["table"] = table data["timestamp"] = timestamp @@ -2259,6 +2267,29 @@ def query_view( else: return response.json() + def get_unique_string_values(self, table: str, datastack_name: str = None): + """get unique string values for a table + + Args: + table: 'str' + datastack_name (str, optional): datastack to query. + If None defaults to one specified in client. + + Returns: + dict[str]: a dictionary of column names and unique values + """ + if datastack_name is None: + datastack_name = self.datastack_name + + endpoint_mapping = self.default_url_mapping + endpoint_mapping["datastack_name"] = datastack_name + endpoint_mapping["table_name"] = table + + url = self._endpoints["unique_string_values"].format_map(endpoint_mapping) + response = self.session.get(url, verify=self.verify) + self.raise_for_status(response) + return response.json() + client_mapping = { 2: MaterializatonClientV2, diff --git a/docs/guide/materialization.rst b/docs/guide/materialization.rst index 92a85177..afe04fbd 100644 --- a/docs/guide/materialization.rst +++ b/docs/guide/materialization.rst @@ -212,7 +212,7 @@ that is more recent that the most recent version available. For convience, you to automatically update the results of your query to a time in the future, such as now. -For example, to pass now, use ```datetime.datetime.utcnow```. Note all timestamps are in UTC +For example, to pass now, use ```datetime.datetime.now(datetime.timezone.utc)```. Note all timestamps are in UTC throughout the codebase. .. code:: python @@ -220,7 +220,7 @@ throughout the codebase. import datetime synapse_table = client.info.get_datastack_info()['synapse_table'] df=client.materialize.live_query(synapse_table, - datetime.datetime.utcnow(), + datetime.datetime.now(datetime.timezone.utc), filter_equal_dict = {'post_pt_root_id': MYID}) This will raise an ValueError exception if the IDs passed in your filters are not valid at the timestamp given @@ -232,11 +232,11 @@ You can also pass a timestamp directly to query_table and it will call live_quer import datetime synapse_table = client.info.get_datastack_info()['synapse_table'] df=client.materialize.query_table(synapse_table, - timestamp=datetime.datetime.utcnow(), + timestamp=datetime.datetime.now(datetime.timezone.utc), filter_equal_dict = {'post_pt_root_id': MYID}) -Also, keep in mind if you run multiple queries and at each time pass ``datetime.datetime.utcnow()``, +Also, keep in mind if you run multiple queries and at each time pass ``datetime.datetime.now(datetime.timezone.utc)``, there is no gauruntee that the IDs will be consistent from query to query, as proofreading might be happening at any time. For larger scale analysis constraining oneself to a materialized version will ensure consistent results. @@ -312,7 +312,7 @@ The one required argument for ``live_query`` is the timestamp. nuc_df = client.materialize.tables.nucleus_detection_v0( id=my_ids ).live_query( - timestamp=datetime.datetime.utcnow(), + timestamp=datetime.datetime.now(datetime.timezone.utc), ) The live query functions have similar but slightly different arguments: ``timestamp`` (required), ``offset``, ``limit``, ``split_positions``, diff --git a/tests/test_chunkedgraph.py b/tests/test_chunkedgraph.py index c7e2ca86..d607f998 100644 --- a/tests/test_chunkedgraph.py +++ b/tests/test_chunkedgraph.py @@ -2,6 +2,7 @@ from .conftest import test_info, TEST_LOCAL_SERVER, TEST_DATASTACK import pytest import responses +from responses.matchers import json_params_matcher import pytz import numpy as np from caveclient.endpoints import ( @@ -35,7 +36,6 @@ def package_timestamp(timestamp, name="timestamp"): class TestChunkedgraph: - _default_endpoint_map = { "cg_server_address": TEST_LOCAL_SERVER, "table_id": test_info["segmentation_source"].split("/")[-1], @@ -47,7 +47,7 @@ def test_get_roots(self, myclient): url = chunkedgraph_endpoints_v1["get_roots"].format_map(endpoint_mapping) svids = np.array([97557743795364048, 75089979126506763], dtype=np.uint64) root_ids = np.array([864691135217871271, 864691135566275148], dtype=np.uint64) - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) query_d = package_timestamp(now) qurl = url + "?" + urlencode(query_d) responses.add( @@ -231,7 +231,7 @@ def test_delta_roots(self, myclient): endpoint_mapping = self._default_endpoint_map url = chunkedgraph_endpoints_v1["delta_roots"].format_map(endpoint_mapping) - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) timestamp_past = now - datetime.timedelta(days=1) query_d = package_timestamp(timestamp_past, name="timestamp_past") query_d.update(package_timestamp(now, name="timestamp_future")) @@ -382,7 +382,7 @@ def test_get_remeshing(self, myclient): responses.POST, status=200, url=url, - match=[responses.json_params_matcher({"new_lvl2_ids": chunkid_list})], + match=[json_params_matcher({"new_lvl2_ids": chunkid_list})], ) myclient.chunkedgraph.remesh_level2_chunks(chunk_ids) @@ -404,7 +404,7 @@ def test_is_latest_roots(self, myclient): status=200, url=url, json={"is_latest": is_latest_list}, - match=[responses.json_params_matcher({"node_ids": root_id_list})], + match=[json_params_matcher({"node_ids": root_id_list})], ) qis_latest = myclient.chunkedgraph.is_latest_roots(root_ids) @@ -434,7 +434,7 @@ def test_past_ids(self, myclient): "864691136577570580": [864691136721486702, 864691133958789149], }, } - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) timestamp_past = now - datetime.timedelta(days=7) query_d = package_timestamp(timestamp_past, name="timestamp_past") @@ -446,7 +446,7 @@ def test_past_ids(self, myclient): status=200, url=qurl, json=id_map_str, - match=[responses.json_params_matcher({"root_ids": root_id_list})], + match=[json_params_matcher({"root_ids": root_id_list})], ) qid_map = myclient.chunkedgraph.get_past_ids( @@ -470,7 +470,7 @@ def test_lineage_graph(self, myclient): url = chunkedgraph_endpoints_v1["handle_lineage_graph"].format_map( endpoint_mapping ) - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) timestamp_past = now - datetime.timedelta(days=7) query_d = package_timestamp(timestamp_past, name="timestamp_past") @@ -621,7 +621,7 @@ def test_preview_split(self, myclient): url=url, body=json.dumps(response_data), match=[ - responses.json_params_matcher( + json_params_matcher( {"sources": qdata_svid["sources"], "sinks": qdata_svid["sinks"]} ) ], @@ -798,3 +798,35 @@ def test_get_info(self, myclient): base_resolution = myclient.chunkedgraph.base_resolution assert np.all(base_resolution == [8, 8, 40]) + + @responses.activate + def test_is_valid_nodes(self, myclient): + + endpoint_mapping = self._default_endpoint_map + url = chunkedgraph_endpoints_v1["valid_nodes"].format_map(endpoint_mapping) + query_nodes = [91070075234304972, 91070075234296549] + data = {"node_ids": query_nodes} + return_data = {"valid_roots": query_nodes} + responses.add( + responses.GET, + status=200, + url=url, + json=return_data, + match=[json_params_matcher(data)], + ) + + out = myclient.chunkedgraph.is_valid_nodes(query_nodes) + assert np.all(out) + + query_nodes = [0, -1] + data = {"node_ids": [0, 18446744073709551615]} + return_data = {"valid_roots": []} + responses.add( + responses.GET, + status=200, + url=url, + json=return_data, + match=[json_params_matcher(data)], + ) + out = myclient.chunkedgraph.is_valid_nodes(query_nodes) + assert not np.any(out) diff --git a/tests/test_materialization.py b/tests/test_materialization.py index 2186b481..9ea76e06 100644 --- a/tests/test_materialization.py +++ b/tests/test_materialization.py @@ -6,6 +6,7 @@ ) import pandas as pd import responses +from responses.matchers import json_params_matcher import pyarrow as pa from urllib.parse import urlencode from .conftest import test_info, TEST_LOCAL_SERVER, TEST_DATASTACK @@ -26,7 +27,7 @@ def match(request_body): return match -class TestChunkedgraphException(Exception): +class ChunkedgraphTestException(Exception): """Error to raise is bad values make it to chunkedgraph""" @@ -140,7 +141,7 @@ def test_matclient(self, myclient, mocker): url=url, body=serialize_dataframe(df), content_type="data.arrow", - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) responses.add( @@ -151,11 +152,7 @@ def test_matclient(self, myclient, mocker): headers={ "dataframe_resolution": "1, 1, 1", }, - match=[ - responses.json_params_matcher( - correct_query_data_with_desired_resolution - ) - ], + match=[json_params_matcher(correct_query_data_with_desired_resolution)], ) meta_url = self.endpoints["metadata"].format_map(endpoint_mapping) @@ -221,7 +218,7 @@ def test_matclient(self, myclient, mocker): ### live query test def my_get_roots(self, supervoxel_ids, timestamp=None, stop_layer=None): if 0 in supervoxel_ids: - raise TestChunkedgraphException( + raise ChunkedgraphTestException( ("should not call get roots on svid =0") ) if timestamp == good_time: @@ -269,7 +266,7 @@ def mocked_get_past_ids( self, root_ids, timestamp_past=None, timestamp_future=None ): if 0 in root_ids: - raise TestChunkedgraphException(("should not past_ids on svid =0")) + raise ChunkedgraphTestException(("should not past_ids on svid =0")) id_map = {201: [100], 103: [103], 203: [101, 102]} return { "future_id_map": {}, @@ -278,7 +275,7 @@ def mocked_get_past_ids( def mock_is_latest_roots(self, root_ids, timestamp=None): if 0 in root_ids: - raise TestChunkedgraphException( + raise ChunkedgraphTestException( ("should not call is_latest on svid =0") ) if timestamp == good_time: @@ -352,7 +349,7 @@ def mock_get_root_timestamps(self, root_ids): url=url, body=serialize_dataframe(df), content_type="data.arrow", - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) correct_query_data = { "filter_in_dict": { @@ -364,7 +361,7 @@ def mock_get_root_timestamps(self, root_ids): url=url, content_type="data.arrow", body=serialize_dataframe(df), - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) correct_query_data = { "filter_in_dict": { @@ -376,7 +373,7 @@ def mock_get_root_timestamps(self, root_ids): url=url, body=serialize_dataframe(df), content_type="data.arrow", - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) dfq = myclient.materialize.live_query( @@ -429,7 +426,7 @@ def mock_get_root_timestamps(self, root_ids): url=url, body=serialize_dataframe(df_ct), content_type="data.arrow", - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) dfq = myclient.materialize.live_query( "cell_types", good_time, split_positions=True @@ -444,7 +441,7 @@ def mock_get_root_timestamps(self, root_ids): url=url, body=serialize_dataframe(df_ct), content_type="data.arrow", - match=[responses.json_params_matcher(correct_query_data)], + match=[json_params_matcher(correct_query_data)], ) dfq = myclient.materialize.live_query( "cell_types",