diff --git a/.editorconfig b/.editorconfig
index 6fc2bd76ca..23291bc39b 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -7,8 +7,8 @@ end_of_line = lf
charset = utf-8
trim_leading_whitespace = true
insert_final_newline = true
-max_line_length = 100
[*.py]
indent_style = space
indent_size = 4
+max_line_length = 88
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index f07536782f..caefb97421 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -56,7 +56,6 @@ jobs:
strategy:
matrix:
python-version:
- - '3.8'
- '3.9'
- '3.10'
- '3.11'
diff --git a/cors_webserver.py b/cors_webserver.py
index bab992c32e..20f86d41c7 100755
--- a/cors_webserver.py
+++ b/cors_webserver.py
@@ -23,38 +23,33 @@
can connect to the web server.
"""
-from __future__ import print_function, absolute_import
import argparse
import os
import sys
-
-try:
- # Python3 and Python2 with future package.
- from http.server import SimpleHTTPRequestHandler, HTTPServer
-except ImportError:
- from BaseHTTPServer import HTTPServer
- from SimpleHTTPServer import SimpleHTTPRequestHandler
+from http.server import HTTPServer, SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
- self.send_header('Access-Control-Allow-Origin', '*')
+ self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
- protocol_version = 'HTTP/1.1'
+ protocol_version = "HTTP/1.1"
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument('-p', '--port', type=int, default=9000, help='TCP port to listen on')
- ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
- ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
+ ap.add_argument(
+ "-p", "--port", type=int, default=9000, help="TCP port to listen on"
+ )
+ ap.add_argument("-a", "--bind", default="127.0.0.1", help="Bind address")
+ ap.add_argument("-d", "--directory", default=".", help="Directory to serve")
args = ap.parse_args()
os.chdir(args.directory)
diff --git a/noxfile.py b/noxfile.py
new file mode 100644
index 0000000000..f033de152a
--- /dev/null
+++ b/noxfile.py
@@ -0,0 +1,22 @@
+import nox
+
+nox.options.reuse_existing_virtualenvs = True
+nox.options.error_on_external_run = True
+
+
+@nox.session
+def lint(session):
+ session.install("-r", "python/requirements-lint.txt")
+ session.run("ruff", "check", ".")
+
+
+@nox.session
+def format(session):
+ session.install("-r", "python/requirements-lint.txt")
+ session.run("ruff", "format", ".")
+
+
+@nox.session
+def mypy(session):
+ session.install("-r", "python/requirements-mypy.txt")
+ session.run("mypy", ".")
diff --git a/pyproject.toml b/pyproject.toml
index 622aecfff4..27ad2f930f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,2 +1,50 @@
[build-system]
-requires = ["setuptools>=64", "wheel", "setuptools_scm", "oldest-supported-numpy"]
+requires = [
+ "setuptools>=64",
+ "wheel",
+ "setuptools_scm",
+ "oldest-supported-numpy",
+]
+
+[tool.ruff]
+target-version = "py39"
+select = [
+ "E", # pycodestyle
+ "W", # pycodestyle
+ "F", # pyflakes
+ "I", # isort
+ "UP", # pyupgrade
+ "NPY", # numpy
+]
+ignore = [
+ "E501", # Line length regulated by ruff format
+]
+
+[tool.mypy]
+show_error_codes = true
+pretty = true
+exclude = [
+ "noxfile\\.py",
+ "^src/",
+ "/guide_video_recorder/",
+ "^docs/",
+]
+
+[[tool.mypy.overrides]]
+module = [
+ "apitools",
+ "apitools.*",
+ "numcodecs",
+ "google",
+ "google.*",
+ "zarr",
+ "zarrita",
+ "tensorstore",
+ "dask",
+ "dask.*",
+ "scipy",
+ "scipy.*",
+ "cloudvolume",
+ "trio",
+]
+ignore_missing_imports = true
diff --git a/python/build_tools/cibuildwheel.sh b/python/build_tools/cibuildwheel.sh
index fde16dfc03..9c528c9d97 100755
--- a/python/build_tools/cibuildwheel.sh
+++ b/python/build_tools/cibuildwheel.sh
@@ -1,7 +1,7 @@
#!/bin/bash
export CIBW_ARCHS_MACOS="x86_64 arm64"
-export CIBW_SKIP="cp27-* cp36-* pp* *_i686 *-win32"
+export CIBW_SKIP="cp27-* cp36-* cp37-* cp38-* pp* *_i686 *-win32"
export CIBW_TEST_EXTRAS="test"
export CIBW_TEST_COMMAND="python -m pytest {project}/python/tests -vv -s --skip-browser-tests"
export CIBW_MANYLINUX_X86_64_IMAGE=manylinux2014
diff --git a/python/copy_openmesh_deps.py b/python/copy_openmesh_deps.py
index dca2f6a1f6..6b32e8bfeb 100755
--- a/python/copy_openmesh_deps.py
+++ b/python/copy_openmesh_deps.py
@@ -7,27 +7,43 @@
import sys
script_dir = os.path.dirname(sys.argv[0])
-src_dir = os.path.join(script_dir, 'ext/src')
-dest_openmesh_dir = os.path.join(script_dir, 'ext/third_party/openmesh/OpenMesh')
+src_dir = os.path.join(script_dir, "ext/src")
+dest_openmesh_dir = os.path.join(script_dir, "ext/third_party/openmesh/OpenMesh")
ap = argparse.ArgumentParser()
-ap.add_argument('openmesh_directory', help='Path to OpenMesh root directory')
+ap.add_argument("openmesh_directory", help="Path to OpenMesh root directory")
args = ap.parse_args()
openmesh_dir = os.path.abspath(args.openmesh_directory)
deps = subprocess.check_output(
- ['gcc', '-pthread', '-I', os.path.join(openmesh_dir, 'src'), '-c', 'openmesh_dependencies.cc',
- 'on_demand_object_mesh_generator.cc', '-MM', '-MF', '/dev/stdout',
- '-fopenmp', '-std=c++11'], cwd=src_dir).split()
-deps = [x[len(openmesh_dir)+1:] for x in deps if x.startswith(openmesh_dir + '/')] + ['LICENSE', 'VERSION']
+ [
+ "gcc",
+ "-pthread",
+ "-I",
+ os.path.join(openmesh_dir, "src"),
+ "-c",
+ "openmesh_dependencies.cc",
+ "on_demand_object_mesh_generator.cc",
+ "-MM",
+ "-MF",
+ "/dev/stdout",
+ "-fopenmp",
+ "-std=c++11",
+ ],
+ text=True,
+ cwd=src_dir,
+).split()
+deps = [
+ x[len(openmesh_dir) + 1 :] for x in deps if x.startswith(openmesh_dir + "/")
+] + ["LICENSE", "VERSION"]
if os.path.exists(dest_openmesh_dir):
- shutil.rmtree(dest_openmesh_dir)
+ shutil.rmtree(dest_openmesh_dir)
for dep in deps:
- dest_path = os.path.join(dest_openmesh_dir, dep)
- dest_dir = os.path.dirname(dest_path)
- if not os.path.exists(dest_dir):
- os.makedirs(dest_dir)
- shutil.copyfile(os.path.join(openmesh_dir, dep), dest_path)
+ dest_path = os.path.join(dest_openmesh_dir, dep)
+ dest_dir = os.path.dirname(dest_path)
+ if not os.path.exists(dest_dir):
+ os.makedirs(dest_dir)
+ shutil.copyfile(os.path.join(openmesh_dir, dep), dest_path)
diff --git a/python/examples/agglomeration_split_tool_csv_to_sqlite.py b/python/examples/agglomeration_split_tool_csv_to_sqlite.py
index e5d78f595d..2aa18140da 100644
--- a/python/examples/agglomeration_split_tool_csv_to_sqlite.py
+++ b/python/examples/agglomeration_split_tool_csv_to_sqlite.py
@@ -1,56 +1,83 @@
-import pandas
+import argparse
import sqlite3
+
import neuroglancer.equivalence_map
-import argparse
import numpy as np
+import pandas
+
def load_edges2(path, include_agglo_id=False):
- edges = []
- dtype = {'segment_a': np.uint64, 'segment_b': np.uint64, 'score': np.float64, 'x': np.int64, 'y': np.int64, 'z': np.int64}
- if include_agglo_id:
- dtype['agglo_id'] = np.uint64
- df = pandas.read_csv(path, sep=',', dtype=dtype)
- return df
+ dtype = {
+ "segment_a": np.uint64,
+ "segment_b": np.uint64,
+ "score": np.float64,
+ "x": np.int64,
+ "y": np.int64,
+ "z": np.int64,
+ }
+ if include_agglo_id:
+ dtype["agglo_id"] = np.uint64
+ df = pandas.read_csv(path, sep=",", dtype=dtype)
+ return df
def write_db(edges_csv_path, output_path, include_agglo_id=False):
+ print("Loading edges")
+ edges = load_edges2(edges_csv_path, include_agglo_id=include_agglo_id)
+
+ all_eqs = neuroglancer.equivalence_map.EquivalenceMap()
+ print("Creating equivalence map for agglomeration")
+ for a, b in edges[["segment_a", "segment_b"]].values:
+ all_eqs.union(a, b)
+
+ conn = sqlite3.connect(output_path)
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE supervoxels (supervoxel_id INTEGER, agglo_id INTEGER)")
+ c.execute(
+ "CREATE INDEX supervoxels_by_supervoxel_id_index ON supervoxels (supervoxel_id)"
+ )
+ c.execute("CREATE INDEX supervoxels_by_agglo_id_index ON supervoxels (agglo_id)")
+ c.execute(
+ "CREATE TABLE edges (agglo_id INTEGER, segment_a INTEGER, segment_b INTEGER, score REAL, x INTEGER, y INTEGER, z INTEGER)"
+ )
+ c.execute("CREATE INDEX edges_by_agglo_id_index ON edges (agglo_id)")
+
+ print("Writing supervoxels table")
+ c.executemany(
+ "INSERT INTO supervoxels VALUES (?,?)",
+ ((int(x), int(all_eqs[x])) for x in all_eqs),
+ )
+
+ print("Writing edges table")
+ c.executemany(
+ "INSERT INTO edges VALUES (?, ?, ?, ?, ?, ?, ?)",
+ (
+ (
+ int(all_eqs[segment_a]),
+ int(segment_a),
+ int(segment_b),
+ float(score),
+ int(x),
+ int(y),
+ int(z),
+ )
+ for (segment_a, segment_b), score, (x, y, z) in zip(
+ edges[["segment_a", "segment_b"]].values,
+ edges["score"].values,
+ edges[["x", "y", "z"]].values,
+ )
+ ),
+ )
+ print("Committing")
+ conn.commit()
+ conn.close()
+
+
+if __name__ == "__main__":
+ ap = argparse.ArgumentParser()
+ ap.add_argument("csv", help="Path to CSV file specifying edges.")
+ ap.add_argument("db", help="Output path to sqlite3 db.")
+ args = ap.parse_args()
- print('Loading edges')
- edges = load_edges2(edges_csv_path, include_agglo_id=include_agglo_id)
-
- all_eqs = neuroglancer.equivalence_map.EquivalenceMap()
- print('Creating equivalence map for agglomeration')
- for a, b in edges[['segment_a', 'segment_b']].values:
- all_eqs.union(a, b)
-
- conn = sqlite3.connect(output_path)
- c = conn.cursor()
-
- c.execute('CREATE TABLE supervoxels (supervoxel_id INTEGER, agglo_id INTEGER)')
- c.execute('CREATE INDEX supervoxels_by_supervoxel_id_index ON supervoxels (supervoxel_id)')
- c.execute('CREATE INDEX supervoxels_by_agglo_id_index ON supervoxels (agglo_id)')
- c.execute('CREATE TABLE edges (agglo_id INTEGER, segment_a INTEGER, segment_b INTEGER, score REAL, x INTEGER, y INTEGER, z INTEGER)')
- c.execute('CREATE INDEX edges_by_agglo_id_index ON edges (agglo_id)')
-
- print('Writing supervoxels table')
- c.executemany('INSERT INTO supervoxels VALUES (?,?)',
- ((int(x), int(all_eqs[x])) for x in all_eqs.keys()))
-
- print('Writing edges table')
- c.executemany(
- 'INSERT INTO edges VALUES (?, ?, ?, ?, ?, ?, ?)',
- ((int(all_eqs[segment_a]), int(segment_a), int(segment_b), float(score), int(x), int(y), int(z))
- for (segment_a, segment_b), score,
- (x, y, z) in zip(edges[['segment_a', 'segment_b']].values, edges['score']
- .values, edges[['x', 'y', 'z']].values)))
- print('Committing')
- conn.commit()
- conn.close()
-
-if __name__ == '__main__':
- ap = argparse.ArgumentParser()
- ap.add_argument('csv', help='Path to CSV file specifying edges.')
- ap.add_argument('db', help='Output path to sqlite3 db.')
- args = ap.parse_args()
-
- write_db(args.csv, args.db)
+ write_db(args.csv, args.db)
diff --git a/python/examples/example.py b/python/examples/example.py
index d55af17a66..ce558cbc52 100755
--- a/python/examples/example.py
+++ b/python/examples/example.py
@@ -1,37 +1,43 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_example_layers(state):
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
- ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
+ ix, iy, iz = np.meshgrid(
+ *[np.linspace(0, 1, n) for n in a.shape[1:]], indexing="ij"
+ )
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255
- b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
- b = np.pad(b, 1, 'constant')
- dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
- units='nm',
- scales=[10, 10, 10])
+ b = np.cast[np.uint32](
+ np.floor(np.sqrt((ix - 0.5) ** 2 + (iy - 0.5) ** 2 + (iz - 0.5) ** 2) * 10)
+ )
+ b = np.pad(b, 1, "constant")
+ dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[10, 10, 10]
+ )
state.dimensions = dimensions
state.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.LocalVolume(
data=a,
dimensions=neuroglancer.CoordinateSpace(
- names=['c^', 'x', 'y', 'z'],
- units=['', 'nm', 'nm', 'nm'],
+ names=["c^", "x", "y", "z"],
+ units=["", "nm", "nm", "nm"],
scales=[1, 10, 10, 10],
coordinate_arrays=[
- neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']), None, None, None
- ]),
+ neuroglancer.CoordinateArray(labels=["red", "green", "blue"]),
+ None,
+ None,
+ None,
+ ],
+ ),
voxel_offset=(0, 20, 30, 15),
),
shader="""
@@ -43,7 +49,7 @@ def add_example_layers(state):
""",
)
state.layers.append(
- name='b',
+ name="b",
layer=neuroglancer.LocalVolume(
data=b,
dimensions=dimensions,
@@ -52,7 +58,7 @@ def add_example_layers(state):
return a, b
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_action.py b/python/examples/example_action.py
index 99cfd1763a..c4d6b8632d 100644
--- a/python/examples/example_action.py
+++ b/python/examples/example_action.py
@@ -1,12 +1,10 @@
-from __future__ import print_function
-
import argparse
import webbrowser
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -14,19 +12,19 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
)
def my_action(s):
- print('Got my-action')
- print(' Mouse position: %s' % (s.mouse_voxel_coordinates, ))
- print(' Layer selected values: %s' % (s.selected_values, ))
+ print("Got my-action")
+ print(f" Mouse position: {s.mouse_voxel_coordinates}")
+ print(f" Layer selected values: {s.selected_values}")
- viewer.actions.add('my-action', my_action)
+ viewer.actions.add("my-action", my_action)
with viewer.config_state.txn() as s:
- s.input_event_bindings.viewer['keyt'] = 'my-action'
- s.status_messages['hello'] = 'Welcome to this example'
+ s.input_event_bindings.viewer["keyt"] = "my-action"
+ s.status_messages["hello"] = "Welcome to this example"
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
diff --git a/python/examples/example_annotation_properties.py b/python/examples/example_annotation_properties.py
index 1d4dcca9bd..5ef7166ef5 100644
--- a/python/examples/example_annotation_properties.py
+++ b/python/examples/example_annotation_properties.py
@@ -1,17 +1,18 @@
-from __future__ import print_function
-
import argparse
+
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y"], units="nm", scales=[1, 1]
+ )
s.position = [150, 150]
s.layers.append(
name="a",
@@ -19,47 +20,47 @@
dimensions=s.dimensions,
annotation_properties=[
neuroglancer.AnnotationPropertySpec(
- id='color',
- type='rgb',
- default='red',
+ id="color",
+ type="rgb",
+ default="red",
),
neuroglancer.AnnotationPropertySpec(
- id='size',
- type='float32',
+ id="size",
+ type="float32",
default=10,
),
neuroglancer.AnnotationPropertySpec(
- id='p_int8',
- type='int8',
+ id="p_int8",
+ type="int8",
default=10,
),
neuroglancer.AnnotationPropertySpec(
- id='p_uint8',
- type='uint8',
+ id="p_uint8",
+ type="uint8",
default=10,
),
],
annotations=[
neuroglancer.PointAnnotation(
- id='1',
+ id="1",
point=[150, 150],
- props=['#0f0', 5, 6, 7],
+ props=["#0f0", 5, 6, 7],
),
neuroglancer.PointAnnotation(
- id='2',
+ id="2",
point=[250, 100],
- props=['#ff0', 30, 7, 9],
+ props=["#ff0", 30, 7, 9],
),
],
- shader='''
+ shader="""
void main() {
setColor(prop_color());
setPointMarkerSize(prop_size());
}
-''',
+""",
),
)
- s.layout = 'xy'
- s.selected_layer.layer = 'a'
- print('Use `Control+right click` to display annotation details.')
+ s.layout = "xy"
+ s.selected_layer.layer = "a"
+ print("Use `Control+right click` to display annotation details.")
print(viewer)
diff --git a/python/examples/example_cdf.py b/python/examples/example_cdf.py
index f229c64438..2250f84ac2 100644
--- a/python/examples/example_cdf.py
+++ b/python/examples/example_cdf.py
@@ -15,17 +15,16 @@
# This example displays layers useful for testing the behavior of the invlerp UI
# control's CDF widget.
-from __future__ import print_function
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_cdf_test_layer(state, dtype, min_value=None, max_value=None):
- dimensions = neuroglancer.CoordinateSpace(names=['x'], units='', scales=[1])
+ dimensions = neuroglancer.CoordinateSpace(names=["x"], units="", scales=[1])
state.dimensions = dimensions
if min_value is None or max_value is None:
info = np.iinfo(dtype)
@@ -33,14 +32,18 @@ def add_cdf_test_layer(state, dtype, min_value=None, max_value=None):
min_value = info.min
if max_value is None:
max_value = info.max
- data = np.linspace(start=min_value, stop=max_value, endpoint=True, dtype=dtype, num=256)
- state.layers[np.dtype(dtype).name] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
- data=data,
- dimensions=dimensions,
- ))
-
-
-if __name__ == '__main__':
+ data = np.linspace(
+ start=min_value, stop=max_value, endpoint=True, dtype=dtype, num=256
+ )
+ state.layers[np.dtype(dtype).name] = neuroglancer.ImageLayer(
+ source=neuroglancer.LocalVolume(
+ data=data,
+ dimensions=dimensions,
+ )
+ )
+
+
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_coordinate_arrays.py b/python/examples/example_coordinate_arrays.py
index 6f005cf88a..f80e0adb2a 100755
--- a/python/examples/example_coordinate_arrays.py
+++ b/python/examples/example_coordinate_arrays.py
@@ -1,48 +1,52 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_example_layers(state):
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
- ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
+ ix, iy, iz = np.meshgrid(
+ *[np.linspace(0, 1, n) for n in a.shape[1:]], indexing="ij"
+ )
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255
- b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
- b = np.pad(b, 1, 'constant')
+ b = np.cast[np.uint32](
+ np.floor(np.sqrt((ix - 0.5) ** 2 + (iy - 0.5) ** 2 + (iz - 0.5) ** 2) * 10)
+ )
+ b = np.pad(b, 1, "constant")
dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'z', 'c'],
- units=['nm', 'nm', 'nm', ''],
+ names=["x", "y", "z", "c"],
+ units=["nm", "nm", "nm", ""],
scales=[10, 10, 10, 1],
coordinate_arrays=[
None,
None,
None,
- neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']),
- ])
+ neuroglancer.CoordinateArray(labels=["red", "green", "blue"]),
+ ],
+ )
state.dimensions = dimensions
state.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.LocalVolume(
data=a,
dimensions=neuroglancer.CoordinateSpace(
- names=['c', 'x', 'y', 'z'],
- units=['', 'nm', 'nm', 'nm'],
+ names=["c", "x", "y", "z"],
+ units=["", "nm", "nm", "nm"],
scales=[1, 10, 10, 10],
),
voxel_offset=(0, 20, 30, 15),
- ))
+ ),
+ )
return a, b
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_cross_section.py b/python/examples/example_cross_section.py
index 8ae04be93a..137b0ab1be 100644
--- a/python/examples/example_cross_section.py
+++ b/python/examples/example_cross_section.py
@@ -1,17 +1,12 @@
-from __future__ import print_function, division
-
import argparse
import copy
-import numpy as np
import time
import neuroglancer
import neuroglancer.cli
-
from example import add_example_layers
-if __name__ == '__main__':
-
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -20,29 +15,26 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
a, b = add_example_layers(s)
- s.layout.type = '3d'
- s.layout.cross_sections['a'] = neuroglancer.CrossSection()
-
+ s.layout.type = "3d"
+ s.layout.cross_sections["a"] = neuroglancer.CrossSection()
def interpolate_to(final_state, frames_per_second=5, seconds=1):
total_frames = int(round(seconds * frames_per_second))
initial_state = viewer.state
for frame_i in range(total_frames):
t = frame_i / total_frames
- viewer.set_state(neuroglancer.ViewerState.interpolate(initial_state, final_state, t))
+ viewer.set_state(
+ neuroglancer.ViewerState.interpolate(initial_state, final_state, t)
+ )
time.sleep(1 / frames_per_second)
viewer.set_state(final_state)
-
-
def move_by(offset, **kwargs):
final_state = copy.deepcopy(viewer.state)
final_state.voxel_coordinates += offset
interpolate_to(final_state, **kwargs)
-
def do_move_by():
- move_by([100, 0, 0], seconds=1, frames_per_second=10)
-
+ move_by([100, 0, 0], seconds=1, frames_per_second=10)
print(viewer)
diff --git a/python/examples/example_dask.py b/python/examples/example_dask.py
index 0d60f64923..708c58d753 100644
--- a/python/examples/example_dask.py
+++ b/python/examples/example_dask.py
@@ -13,10 +13,10 @@
# limitations under the License.
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_dask_layer(state):
@@ -26,21 +26,23 @@ def add_dask_layer(state):
import dask.array
def make_array(k):
- print('Computing k=%d' % (k, ))
+ print("Computing k=%d" % (k,))
return np.full(shape=(256, 256), fill_value=k, dtype=np.uint8)
lazy_make_array = dask.delayed(make_array, pure=True)
lazy_chunks = [lazy_make_array(k) for k in range(255)]
- sample = lazy_chunks[0].compute() # load the first chunk (assume rest are same shape/dtype)
+ sample = lazy_chunks[
+ 0
+ ].compute() # load the first chunk (assume rest are same shape/dtype)
arrays = [
dask.array.from_delayed(lazy_chunk, dtype=sample.dtype, shape=sample.shape)
for lazy_chunk in lazy_chunks
]
x = dask.array.concatenate(arrays)
- state.layers['dask'] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(x))
+ state.layers["dask"] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(x))
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_fixed_token.py b/python/examples/example_fixed_token.py
index c536036b15..a1ddb0814a 100644
--- a/python/examples/example_fixed_token.py
+++ b/python/examples/example_fixed_token.py
@@ -1,15 +1,10 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
-
from example import add_example_layers
-if __name__ == '__main__':
-
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -18,7 +13,7 @@
# Specifying a token disables credentials by default. Specify
# `allow_credentials=True` to allow credentials, but in that case you must
# specify a secure/ungessable token to avoid exposing the credentials.
- viewer = neuroglancer.Viewer(token='mytoken')
+ viewer = neuroglancer.Viewer(token="mytoken")
with viewer.txn() as s:
a, b = add_example_layers(s)
diff --git a/python/examples/example_grid_layout.py b/python/examples/example_grid_layout.py
index d7f130fc97..6cdc4c3322 100644
--- a/python/examples/example_grid_layout.py
+++ b/python/examples/example_grid_layout.py
@@ -1,12 +1,10 @@
-from __future__ import print_function
-
import argparse
import webbrowser
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -14,22 +12,28 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
+ )
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layout = neuroglancer.row_layout(
+ [
+ neuroglancer.column_layout(
+ [
+ neuroglancer.LayerGroupViewer(layers=["image", "ground_truth"]),
+ neuroglancer.LayerGroupViewer(layers=["image", "ground_truth"]),
+ ]
+ ),
+ neuroglancer.column_layout(
+ [
+ neuroglancer.LayerGroupViewer(layers=["ground_truth"]),
+ neuroglancer.LayerGroupViewer(layers=["ground_truth"]),
+ ]
+ ),
+ ]
)
- s.layout = neuroglancer.row_layout([
- neuroglancer.column_layout([
- neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
- neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
- ]),
- neuroglancer.column_layout([
- neuroglancer.LayerGroupViewer(layers=['ground_truth']),
- neuroglancer.LayerGroupViewer(layers=['ground_truth']),
- ]),
- ])
print(viewer.state)
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
diff --git a/python/examples/example_layer_side_panels.py b/python/examples/example_layer_side_panels.py
index 73951535bf..a884481492 100644
--- a/python/examples/example_layer_side_panels.py
+++ b/python/examples/example_layer_side_panels.py
@@ -4,7 +4,7 @@
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -12,27 +12,27 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
panels=[
neuroglancer.LayerSidePanelState(
- side='left',
- col = 0,
- row = 0,
- tab='render',
- tabs=['source', 'rendering'],
+ side="left",
+ col=0,
+ row=0,
+ tab="render",
+ tabs=["source", "rendering"],
),
neuroglancer.LayerSidePanelState(
- side='left',
- col = 0,
+ side="left",
+ col=0,
row=1,
- tab='render',
- tabs=['annotations'],
+ tab="render",
+ tabs=["annotations"],
),
],
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
print(viewer.state)
print(viewer)
diff --git a/python/examples/example_local_volume_coordinate_arrays.py b/python/examples/example_local_volume_coordinate_arrays.py
index 27225546df..2f646a2bac 100644
--- a/python/examples/example_local_volume_coordinate_arrays.py
+++ b/python/examples/example_local_volume_coordinate_arrays.py
@@ -1,41 +1,48 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_example_layers(state):
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
- ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
+ ix, iy, iz = np.meshgrid(
+ *[np.linspace(0, 1, n) for n in a.shape[1:]], indexing="ij"
+ )
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255
- b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
- b = np.pad(b, 1, 'constant')
- dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
- units='nm',
- scales=[10, 10, 10])
+ b = np.cast[np.uint32](
+ np.floor(np.sqrt((ix - 0.5) ** 2 + (iy - 0.5) ** 2 + (iz - 0.5) ** 2) * 10)
+ )
+ b = np.pad(b, 1, "constant")
+ dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[10, 10, 10]
+ )
state.dimensions = dimensions
state.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.LocalVolume(
data=a,
dimensions=neuroglancer.CoordinateSpace(
- names=['c', 'x', 'y', 'z'],
- units=['', 'nm', 'nm', 'nm'],
+ names=["c", "x", "y", "z"],
+ units=["", "nm", "nm", "nm"],
scales=[1, 10, 10, 10],
coordinate_arrays=[
- neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']), None, None, None
- ]),
+ neuroglancer.CoordinateArray(labels=["red", "green", "blue"]),
+ None,
+ None,
+ None,
+ ],
+ ),
voxel_offset=(0, 20, 30, 15),
- ))
+ ),
+ )
state.layers.append(
- name='b',
+ name="b",
layer=neuroglancer.LocalVolume(
data=b,
dimensions=dimensions,
@@ -44,7 +51,7 @@ def add_example_layers(state):
return a, b
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_overlay.py b/python/examples/example_overlay.py
index 104fe931e9..57cab1f585 100644
--- a/python/examples/example_overlay.py
+++ b/python/examples/example_overlay.py
@@ -1,12 +1,9 @@
-from __future__ import print_function
-
import argparse
import webbrowser
-import numpy as np
-
import neuroglancer
import neuroglancer.cli
+import numpy as np
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
@@ -16,26 +13,28 @@
viewer = neuroglancer.Viewer()
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
-ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
+ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing="ij")
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['overlay'] = neuroglancer.ImageLayer(
+ s.layers["overlay"] = neuroglancer.ImageLayer(
source=neuroglancer.LocalVolume(
a,
dimensions=neuroglancer.CoordinateSpace(
scales=[1, 8, 8, 8],
- units=['', 'nm', 'nm', 'nm'],
- names=['c^', 'x', 'y', 'z']),
- voxel_offset=[0, 3000, 3000, 3000]),
+ units=["", "nm", "nm", "nm"],
+ names=["c^", "x", "y", "z"],
+ ),
+ voxel_offset=[0, 3000, 3000, 3000],
+ ),
shader="""
void main() {
emitRGB(vec3(toNormalized(getDataValue(0)),
diff --git a/python/examples/example_partial_viewport.py b/python/examples/example_partial_viewport.py
index 61a6c0893b..c3a1143d53 100644
--- a/python/examples/example_partial_viewport.py
+++ b/python/examples/example_partial_viewport.py
@@ -1,12 +1,9 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import copy
import neuroglancer
import neuroglancer.cli
-
+import numpy as np
from example import add_example_layers
@@ -16,18 +13,19 @@ def handler(s):
for i, amount in adjustments:
s.partial_viewport[i] += amount
s.partial_viewport[:2] = np.clip(s.partial_viewport[:2], 0, 0.9)
- s.partial_viewport[2:] = np.clip(s.partial_viewport[2:], 0.1,
- 1 - s.partial_viewport[:2])
+ s.partial_viewport[2:] = np.clip(
+ s.partial_viewport[2:], 0.1, 1 - s.partial_viewport[:2]
+ )
partial_viewport = np.array(s.partial_viewport)
with viewer.config_state.txn() as s:
s.viewer_size = [256, 256]
- s.status_messages['note'] = 'Viewport: %r' % (partial_viewport, )
+ s.status_messages["note"] = f"Viewport: {partial_viewport!r}"
return handler
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -42,31 +40,48 @@ def _add_prefetch(s):
prefetch_state = copy.deepcopy(initial_state)
prefetch_state.partial_viewport = [0.5, 0, 0.5, 1]
s.prefetch = [neuroglancer.PrefetchState(state=prefetch_state, priority=0)]
+
def _remove_prefetch(s):
with viewer.config_state.txn() as s:
del s.prefetch[:]
- viewer.actions.add('viewport-translate-left', _make_viewport_adjust_command([(0, -0.1)]))
- viewer.actions.add('viewport-translate-right', _make_viewport_adjust_command([(0, 0.1)]))
- viewer.actions.add('viewport-translate-up', _make_viewport_adjust_command([(1, -0.1)]))
- viewer.actions.add('viewport-translate-down', _make_viewport_adjust_command([(1, 0.1)]))
- viewer.actions.add('viewport-shrink-width', _make_viewport_adjust_command([(2, -0.1)]))
- viewer.actions.add('viewport-enlarge-width', _make_viewport_adjust_command([(2, 0.1)]))
- viewer.actions.add('viewport-shrink-height', _make_viewport_adjust_command([(3, -0.1)]))
- viewer.actions.add('viewport-enlarge-height', _make_viewport_adjust_command([(3, 0.1)]))
- viewer.actions.add('viewport-add-prefetch', _add_prefetch)
- viewer.actions.add('viewport-remove-prefetch', _remove_prefetch)
+ viewer.actions.add(
+ "viewport-translate-left", _make_viewport_adjust_command([(0, -0.1)])
+ )
+ viewer.actions.add(
+ "viewport-translate-right", _make_viewport_adjust_command([(0, 0.1)])
+ )
+ viewer.actions.add(
+ "viewport-translate-up", _make_viewport_adjust_command([(1, -0.1)])
+ )
+ viewer.actions.add(
+ "viewport-translate-down", _make_viewport_adjust_command([(1, 0.1)])
+ )
+ viewer.actions.add(
+ "viewport-shrink-width", _make_viewport_adjust_command([(2, -0.1)])
+ )
+ viewer.actions.add(
+ "viewport-enlarge-width", _make_viewport_adjust_command([(2, 0.1)])
+ )
+ viewer.actions.add(
+ "viewport-shrink-height", _make_viewport_adjust_command([(3, -0.1)])
+ )
+ viewer.actions.add(
+ "viewport-enlarge-height", _make_viewport_adjust_command([(3, 0.1)])
+ )
+ viewer.actions.add("viewport-add-prefetch", _add_prefetch)
+ viewer.actions.add("viewport-remove-prefetch", _remove_prefetch)
with viewer.config_state.txn() as s:
- s.input_event_bindings.viewer['keyh'] = 'viewport-translate-left'
- s.input_event_bindings.viewer['keyl'] = 'viewport-translate-right'
- s.input_event_bindings.viewer['keyj'] = 'viewport-translate-down'
- s.input_event_bindings.viewer['keyk'] = 'viewport-translate-up'
- s.input_event_bindings.viewer['keyt'] = 'viewport-add-prefetch'
- s.input_event_bindings.viewer['keyg'] = 'viewport-remove-prefetch'
- s.input_event_bindings.viewer['shift+keyu'] = 'viewport-shrink-height'
- s.input_event_bindings.viewer['shift+keyi'] = 'viewport-enlarge-height'
- s.input_event_bindings.viewer['shift+keyy'] = 'viewport-shrink-width'
- s.input_event_bindings.viewer['shift+keyo'] = 'viewport-enlarge-width'
+ s.input_event_bindings.viewer["keyh"] = "viewport-translate-left"
+ s.input_event_bindings.viewer["keyl"] = "viewport-translate-right"
+ s.input_event_bindings.viewer["keyj"] = "viewport-translate-down"
+ s.input_event_bindings.viewer["keyk"] = "viewport-translate-up"
+ s.input_event_bindings.viewer["keyt"] = "viewport-add-prefetch"
+ s.input_event_bindings.viewer["keyg"] = "viewport-remove-prefetch"
+ s.input_event_bindings.viewer["shift+keyu"] = "viewport-shrink-height"
+ s.input_event_bindings.viewer["shift+keyi"] = "viewport-enlarge-height"
+ s.input_event_bindings.viewer["shift+keyy"] = "viewport-shrink-width"
+ s.input_event_bindings.viewer["shift+keyo"] = "viewport-enlarge-width"
print(viewer)
diff --git a/python/examples/example_precomputed_gcs.py b/python/examples/example_precomputed_gcs.py
index 70ec08aad2..a3b9a7b886 100644
--- a/python/examples/example_precomputed_gcs.py
+++ b/python/examples/example_precomputed_gcs.py
@@ -1,17 +1,16 @@
-from __future__ import print_function
-
import argparse
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image')
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image"
+ )
print(viewer)
diff --git a/python/examples/example_row_layout.py b/python/examples/example_row_layout.py
index 62439f9d89..7c5831ceee 100644
--- a/python/examples/example_row_layout.py
+++ b/python/examples/example_row_layout.py
@@ -1,12 +1,10 @@
-from __future__ import print_function
-
import argparse
import webbrowser
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -14,16 +12,18 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
+ )
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layout = neuroglancer.row_layout(
+ [
+ neuroglancer.LayerGroupViewer(layers=["image", "ground_truth"]),
+ neuroglancer.LayerGroupViewer(layers=["ground_truth"]),
+ ]
)
- s.layout = neuroglancer.row_layout([
- neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
- neuroglancer.LayerGroupViewer(layers=['ground_truth']),
- ])
print(viewer.state)
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
diff --git a/python/examples/example_signed_int.py b/python/examples/example_signed_int.py
index 2a0c4fde24..c0f69ff2e1 100644
--- a/python/examples/example_signed_int.py
+++ b/python/examples/example_signed_int.py
@@ -1,30 +1,39 @@
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
+import numpy as np
def add_example_layer(state):
- ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in [100, 100, 100]], indexing='ij')
- b = np.cast[np.int32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10)) - 2
- b = np.pad(b, 1, 'constant')
- dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
- units='nm',
- scales=[10, 10, 10])
+ ix, iy, iz = np.meshgrid(
+ *[np.linspace(0, 1, n) for n in [100, 100, 100]], indexing="ij"
+ )
+ b = (
+ np.cast[np.int32](
+ np.floor(np.sqrt((ix - 0.5) ** 2 + (iy - 0.5) ** 2 + (iz - 0.5) ** 2) * 10)
+ )
+ - 2
+ )
+ b = np.pad(b, 1, "constant")
+ dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[10, 10, 10]
+ )
state.dimensions = dimensions
state.layers.append(
- name='b',
- layer=neuroglancer.SegmentationLayer(source=neuroglancer.LocalVolume(
- data=b,
- dimensions=dimensions,
- )),
+ name="b",
+ layer=neuroglancer.SegmentationLayer(
+ source=neuroglancer.LocalVolume(
+ data=b,
+ dimensions=dimensions,
+ )
+ ),
)
return b
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_single_mesh_layer.py b/python/examples/example_single_mesh_layer.py
index 377c4b8151..30bd69f06c 100644
--- a/python/examples/example_single_mesh_layer.py
+++ b/python/examples/example_single_mesh_layer.py
@@ -1,21 +1,16 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
import neuroglancer
import neuroglancer.cli
-
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['mesh'] = neuroglancer.SingleMeshLayer(
- source=
- 'vtk://https://storage.googleapis.com/neuroglancer-fafb-data/elmr-data/FAFB.surf.vtk.gz'
+ s.layers["mesh"] = neuroglancer.SingleMeshLayer(
+ source="vtk://https://storage.googleapis.com/neuroglancer-fafb-data/elmr-data/FAFB.surf.vtk.gz"
)
print(viewer)
diff --git a/python/examples/example_skeletons.py b/python/examples/example_skeletons.py
index 311b6ecb77..dad58773cd 100644
--- a/python/examples/example_skeletons.py
+++ b/python/examples/example_skeletons.py
@@ -1,11 +1,8 @@
-from __future__ import print_function
-
import argparse
-import numpy as np
-
import neuroglancer
import neuroglancer.cli
+import numpy as np
voxel_size = np.array([10, 10, 10])
@@ -16,35 +13,37 @@
class SkeletonSource(neuroglancer.skeleton.SkeletonSource):
def __init__(self, dimensions):
- super(SkeletonSource, self).__init__(dimensions)
- self.vertex_attributes['affinity'] = neuroglancer.skeleton.VertexAttributeInfo(
+ super().__init__(dimensions)
+ self.vertex_attributes["affinity"] = neuroglancer.skeleton.VertexAttributeInfo(
data_type=np.float32,
num_components=1,
)
- self.vertex_attributes['affinity2'] = neuroglancer.skeleton.VertexAttributeInfo(
+ self.vertex_attributes["affinity2"] = neuroglancer.skeleton.VertexAttributeInfo(
data_type=np.float32,
num_components=1,
)
def get_skeleton(self, i):
- pos = np.unravel_index(i, shape, order='C')
- vertex_positions = [pos, pos + np.random.randn(3) * 30]
+ pos = np.unravel_index(i, shape, order="C")
+ gen = np.random.default_rng()
+ vertex_positions = [pos, pos + gen.randn(3) * 30]
edges = [[0, 1]]
return neuroglancer.skeleton.Skeleton(
vertex_positions=vertex_positions,
edges=edges,
- vertex_attributes=dict(affinity=np.random.rand(2), affinity2=np.random.rand(2)))
+ vertex_attributes=dict(affinity=gen.rand(2), affinity2=gen.rand(2)),
+ )
viewer = neuroglancer.Viewer()
dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'z'],
- units='nm',
+ names=["x", "y", "z"],
+ units="nm",
scales=[10, 10, 10],
)
with viewer.txn() as s:
s.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.SegmentationLayer(
source=[
neuroglancer.LocalVolume(
@@ -53,22 +52,23 @@ def get_skeleton(self, i):
),
SkeletonSource(dimensions),
],
- skeleton_shader='void main() { emitRGB(colormapJet(affinity)); }',
+ skeleton_shader="void main() { emitRGB(colormapJet(affinity)); }",
selected_alpha=0,
not_selected_alpha=0,
segments=[395750],
- ))
+ ),
+ )
# Can adjust the skeleton rendering options
- s.layers[0].skeleton_rendering.mode2d = 'lines'
+ s.layers[0].skeleton_rendering.mode2d = "lines"
s.layers[0].skeleton_rendering.line_width2d = 3
- s.layers[0].skeleton_rendering.mode3d = 'lines_and_points'
+ s.layers[0].skeleton_rendering.mode3d = "lines_and_points"
s.layers[0].skeleton_rendering.line_width3d = 10
# Can adjust visibility of layer side panel
- s.selected_layer.layer = 'a'
+ s.selected_layer.layer = "a"
s.selected_layer.visible = True
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/example_toggle_visibility.py b/python/examples/example_toggle_visibility.py
index e6eca9b8dc..5fb9970161 100644
--- a/python/examples/example_toggle_visibility.py
+++ b/python/examples/example_toggle_visibility.py
@@ -1,30 +1,32 @@
-from __future__ import print_function
-
import argparse
+
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
+
def toggle_visibility(s):
with viewer.txn() as s:
- if s.layers['a'].visible == True:
- s.layers['a'].visible = False
- print('Setting visibility to false')
+ if s.layers["a"].visible is True:
+ s.layers["a"].visible = False
+ print("Setting visibility to false")
else:
- s.layers['a'].visible = True
- print('Setting visibility to true')
+ s.layers["a"].visible = True
+ print("Setting visibility to true")
- viewer.actions.add('toggle-visibility', toggle_visibility)
+ viewer.actions.add("toggle-visibility", toggle_visibility)
with viewer.config_state.txn() as s:
- s.input_event_bindings.viewer['keys'] = 'toggle-visibility'
+ s.input_event_bindings.viewer["keys"] = "toggle-visibility"
with viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y"], units="nm", scales=[1, 1]
+ )
s.position = [150, 150]
s.layers.append(
name="a",
@@ -32,18 +34,18 @@ def toggle_visibility(s):
dimensions=s.dimensions,
annotations=[
neuroglancer.PointAnnotation(
- id='1',
+ id="1",
point=[150, 150],
),
],
- shader='''
+ shader="""
void main() {
setColor(prop_color());
setPointMarkerSize(prop_size());
}
-''',
+""",
),
)
- s.layout = 'xy'
- s.selected_layer.layer = 'a'
+ s.layout = "xy"
+ s.selected_layer.layer = "a"
print(viewer)
diff --git a/python/examples/example_tool.py b/python/examples/example_tool.py
index 1154c37a45..ecd34ec30f 100644
--- a/python/examples/example_tool.py
+++ b/python/examples/example_tool.py
@@ -4,7 +4,7 @@
import neuroglancer
import neuroglancer.cli
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -12,11 +12,11 @@
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
tool_bindings={
- 'A': neuroglancer.ShaderControlTool(control='normalized'),
- 'B': neuroglancer.OpacityTool(),
+ "A": neuroglancer.ShaderControlTool(control="normalized"),
+ "B": neuroglancer.OpacityTool(),
},
)
diff --git a/python/examples/extend_segments_tool.py b/python/examples/extend_segments_tool.py
index 031ac6ecdc..5dfb0a792c 100755
--- a/python/examples/extend_segments_tool.py
+++ b/python/examples/extend_segments_tool.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python2
"""Tool for extending via equivalences a set of segments."""
-from __future__ import absolute_import, print_function
import argparse
import copy
@@ -11,7 +10,7 @@
import neuroglancer
from neuroglancer.json_utils import decode_json, encode_json
-neuroglancer.set_static_content_source(url='http://localhost:8080')
+neuroglancer.set_static_content_source(url="http://localhost:8080")
def get_segmentation_layer(layers):
@@ -20,27 +19,29 @@ def get_segmentation_layer(layers):
return layer
-class Annotator(object):
+class Annotator:
def __init__(self, filename):
self.filename = filename
- self.point_annotation_layer_name = 'false-merges'
+ self.point_annotation_layer_name = "false-merges"
self.states = []
self.state_index = None
viewer = self.viewer = neuroglancer.Viewer()
self.other_state_segment_ids = dict()
- viewer.actions.add('anno-next-state', lambda s: self.next_state())
- viewer.actions.add('anno-prev-state', lambda s: self.prev_state())
- viewer.actions.add('anno-save', lambda s: self.save())
- viewer.actions.add('anno-show-all', lambda s: self.set_combined_state())
- viewer.actions.add('anno-add-segments-from-state',
- lambda s: self.add_segments_from_state(s.viewer_state))
+ viewer.actions.add("anno-next-state", lambda s: self.next_state())
+ viewer.actions.add("anno-prev-state", lambda s: self.prev_state())
+ viewer.actions.add("anno-save", lambda s: self.save())
+ viewer.actions.add("anno-show-all", lambda s: self.set_combined_state())
+ viewer.actions.add(
+ "anno-add-segments-from-state",
+ lambda s: self.add_segments_from_state(s.viewer_state),
+ )
with viewer.config_state.txn() as s:
- s.input_event_bindings.viewer['pageup'] = 'anno-prev-state'
- s.input_event_bindings.viewer['pagedown'] = 'anno-next-state'
- s.input_event_bindings.viewer['control+keys'] = 'anno-save'
- s.input_event_bindings.viewer['control+keya'] = 'anno-show-all'
+ s.input_event_bindings.viewer["pageup"] = "anno-prev-state"
+ s.input_event_bindings.viewer["pagedown"] = "anno-next-state"
+ s.input_event_bindings.viewer["control+keys"] = "anno-save"
+ s.input_event_bindings.viewer["control+keya"] = "anno-show-all"
viewer.shared_state.add_changed_callback(self.on_state_changed)
self.cur_message = None
@@ -52,23 +53,25 @@ def on_state_changed(self):
def update_message(self):
if self.state_index is None:
- message = '[No state selected]'
+ message = "[No state selected]"
else:
- message = '[%d/%d] ' % (self.state_index, len(self.states))
+ message = "[%d/%d] " % (self.state_index, len(self.states))
segments = self.get_state_segment_ids(self.viewer.state)
warnings = []
for segment_id in segments:
other_state = self.other_state_segment_ids.get(segment_id)
if other_state is not None:
- warnings.append('Segment %d also in state %d' % (segment_id, other_state))
+ warnings.append(
+ "Segment %d also in state %d" % (segment_id, other_state)
+ )
if warnings:
- message += 'WARNING: ' + ', '.join(warnings)
+ message += "WARNING: " + ", ".join(warnings)
if message != self.cur_message:
with self.viewer.config_state.txn() as s:
if message is not None:
- s.status_messages['status'] = message
+ s.status_messages["status"] = message
else:
- s.status_messages.pop('status')
+ s.status_messages.pop("status")
self.cur_message = message
def load(self):
@@ -76,17 +79,19 @@ def load(self):
return False
self.state_index = None
- with open(self.filename, 'r') as f:
+ with open(self.filename) as f:
loaded_state = decode_json(f.read())
- self.states = [neuroglancer.ViewerState(x) for x in loaded_state['states']]
- self.set_state_index(loaded_state['state_index'])
+ self.states = [neuroglancer.ViewerState(x) for x in loaded_state["states"]]
+ self.set_state_index(loaded_state["state_index"])
return True
def set_state_index_relative(self, amount):
if self.state_index is None:
new_state = 0
else:
- new_state = (self.state_index + amount + len(self.states)) % len(self.states)
+ new_state = (self.state_index + amount + len(self.states)) % len(
+ self.states
+ )
self.set_state_index(new_state)
def next_state(self):
@@ -122,7 +127,7 @@ def get_duplicate_segment_ids(self):
for segment_id in other_ids:
state_numbers = other_ids[segment_id]
if len(state_numbers) > 1:
- print('%d in %r' % (segment_id, state_numbers))
+ print("%d in %r" % (segment_id, state_numbers))
def _grab_viewer_state(self):
if self.state_index is not None:
@@ -130,13 +135,18 @@ def _grab_viewer_state(self):
def save(self):
self._grab_viewer_state()
- tmp_filename = self.filename + '.tmp'
- with open(tmp_filename, 'wb') as f:
+ tmp_filename = self.filename + ".tmp"
+ with open(tmp_filename, "wb") as f:
f.write(
encode_json(
- dict(states=[s.to_json() for s in self.states], state_index=self.state_index)))
+ dict(
+ states=[s.to_json() for s in self.states],
+ state_index=self.state_index,
+ )
+ )
+ )
os.rename(tmp_filename, self.filename)
- print('Saved state to: %s' % (self.filename, ))
+ print(f"Saved state to: {self.filename}")
def get_state_segment_ids(self, state):
return get_segmentation_layer(state.layers).segments
@@ -159,7 +169,7 @@ def add_segments_from_state(self, base_state):
for segment_id in segment_ids:
if segment_id in existing_segment_ids:
- print('Skipping redundant segment id %d' % segment_id)
+ print("Skipping redundant segment id %d" % segment_id)
continue
self.states.append(self.make_initial_state(segment_id, base_state))
@@ -172,7 +182,9 @@ def make_initial_state(self, segment_id, base_state):
segments = self.get_state_segment_ids(state)
segments.clear()
segments[segment_id] = True
- state.layers[self.point_annotation_layer_name] = neuroglancer.PointAnnotationLayer()
+ state.layers[
+ self.point_annotation_layer_name
+ ] = neuroglancer.PointAnnotationLayer()
return state
@@ -185,7 +197,7 @@ def remove_zero_segments(self):
def set_combined_state(self):
state = self.make_combined_state()
if state is None:
- print('No states')
+ print("No states")
else:
self.set_state_index(None)
self.viewer.set_state(state)
@@ -224,27 +236,33 @@ def print_combined_state_url(self):
print(neuroglancer.to_url(self.make_combined_state()))
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument('filename', type=str)
+ ap.add_argument("filename", type=str)
ap.add_argument(
- '-a',
- '--add-segments-from-url',
+ "-a",
+ "--add-segments-from-url",
type=str,
- nargs='*',
+ nargs="*",
default=[],
- help='Add a new state for each selected segment specified by a Neuroglancer URL.')
+ help="Add a new state for each selected segment specified by a Neuroglancer URL.",
+ )
ap.add_argument(
- '-n', '--no-webbrowser', action='store_true', help='Don\'t open the webbrowser.')
- ap.add_argument('--print-sets', action='store_true', help='Print the sets of supervoxels.')
+ "-n", "--no-webbrowser", action="store_true", help="Don't open the webbrowser."
+ )
ap.add_argument(
- '--print-combined-state',
- action='store_true',
- help='Prints a neuroglancer link for the combined state.')
+ "--print-sets", action="store_true", help="Print the sets of supervoxels."
+ )
ap.add_argument(
- '--print-summary',
- action='store_true',
- help='Prints a neuroglancer link for the combined state.')
+ "--print-combined-state",
+ action="store_true",
+ help="Prints a neuroglancer link for the combined state.",
+ )
+ ap.add_argument(
+ "--print-summary",
+ action="store_true",
+ help="Prints a neuroglancer link for the combined state.",
+ )
args = ap.parse_args()
anno = Annotator(args.filename)
@@ -258,12 +276,14 @@ def print_combined_state_url(self):
anno.print_combined_state_url()
if args.print_summary:
- print('')
- print('
%s
' % args.filename)
+ print("")
+ print("%s
" % args.filename)
print(
- 'Neuroglancer
' % neuroglancer.to_url(anno.make_combined_state()))
+ 'Neuroglancer
'
+ % neuroglancer.to_url(anno.make_combined_state())
+ )
print(repr(anno.get_sets()))
- print('')
+ print("")
else:
print(anno.get_viewer_url())
diff --git a/python/examples/flood_filling_simulation.py b/python/examples/flood_filling_simulation.py
index f7e06912aa..cdb044fd7b 100755
--- a/python/examples/flood_filling_simulation.py
+++ b/python/examples/flood_filling_simulation.py
@@ -24,52 +24,54 @@
import argparse
import random
-import time
import threading
+import time
+import cloudvolume
import neuroglancer
import neuroglancer.cli
-import cloudvolume
-import zarr
import numpy as np
import scipy.ndimage
+import zarr
-class InteractiveInference(object):
+class InteractiveInference:
def __init__(self):
viewer = self.viewer = neuroglancer.Viewer()
self.gt_vol = cloudvolume.CloudVolume(
- 'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
+ "https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth",
mip=0,
bounded=True,
progress=False,
- provenance={})
- viewer.actions.add('start-fill', self._start_fill_action)
- viewer.actions.add('stop-fill', self._stop_fill_action)
+ provenance={},
+ )
+ viewer.actions.add("start-fill", self._start_fill_action)
+ viewer.actions.add("stop-fill", self._stop_fill_action)
self.dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'z'],
- units='nm',
+ names=["x", "y", "z"],
+ units="nm",
scales=[8, 8, 8],
)
with viewer.config_state.txn() as s:
- s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
- s.input_event_bindings.data_view['keyt'] = 'stop-fill'
+ s.input_event_bindings.data_view["shift+mousedown0"] = "start-fill"
+ s.input_event_bindings.data_view["keyt"] = "stop-fill"
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['ground_truth'].visible = False
+ s.layers["ground_truth"].visible = False
self.flood_fill_event = None
def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))
gt_vol_zarr = zarr.zeros(
- self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint64)
+ self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint64
+ )
gt_blocks_seen = set()
@@ -78,9 +80,11 @@ def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
def fetch_gt_block(block):
spos = block * block_size
epos = spos + block_size
- slice_expr = np.s_[int(spos[0]):int(epos[0]),
- int(spos[1]):int(epos[1]),
- int(spos[2]):int(epos[2])]
+ slice_expr = np.s_[
+ int(spos[0]) : int(epos[0]),
+ int(spos[1]) : int(epos[1]),
+ int(spos[2]) : int(epos[2]),
+ ]
gt_data = self.gt_vol[slice_expr][..., 0]
gt_vol_zarr[slice_expr] = gt_data
@@ -92,20 +96,25 @@ def get_patch(spos, epos):
for blockoff in np.ndindex(tuple(eblock - sblock + 1)):
block = np.array(blockoff) + sblock
block_tuple = tuple(block)
- if block_tuple in gt_blocks_seen: continue
+ if block_tuple in gt_blocks_seen:
+ continue
gt_blocks_seen.add(block_tuple)
fetch_gt_block(block)
- slice_expr = np.s_[int(spos[0]):int(epos[0]),
- int(spos[1]):int(epos[1]),
- int(spos[2]):int(epos[2])]
+ slice_expr = np.s_[
+ int(spos[0]) : int(epos[0]),
+ int(spos[1]) : int(epos[1]),
+ int(spos[2]) : int(epos[2]),
+ ]
result = gt_vol_zarr[slice_expr]
return result
segment_id = self.gt_vol[initial_pos][0]
- patch_size = np.array((33, ) * 3, np.int64)
+ patch_size = np.array((33,) * 3, np.int64)
lower_bound = patch_size // 2
- upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2
+ upper_bound = (
+ np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2
+ )
d = 8
seen = set()
@@ -115,8 +124,10 @@ def get_patch(spos, epos):
invalidate_interval = 3
def enqueue(pos):
- if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return
- if pos in seen: return
+ if np.any(pos < lower_bound) or np.any(pos >= upper_bound):
+ return
+ if pos in seen:
+ return
seen.add(pos)
q.append(pos)
@@ -129,26 +140,38 @@ def update_view():
last_invalidate[0] = cur_time
inf_volume.invalidate()
with self.viewer.txn() as s:
- s.layers['points'].annotations = [
- neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen)
+ s.layers["points"].annotations = [
+ neuroglancer.PointAnnotation(id=repr(pos), point=pos)
+ for pos in list(seen)
]
def process_pos(pos):
spos = pos - patch_size // 2
epos = spos + patch_size
- slice_expr = np.s_[int(spos[0]):int(epos[0]),
- int(spos[1]):int(epos[1]),
- int(spos[2]):int(epos[2])]
+ slice_expr = np.s_[
+ int(spos[0]) : int(epos[0]),
+ int(spos[1]) : int(epos[1]),
+ int(spos[2]) : int(epos[2]),
+ ]
gt_data = get_patch(spos, epos)
mask = gt_data == segment_id
- for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)):
- if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue
+ for offset in (
+ (0, 0, d),
+ (0, 0, -d),
+ (0, d, 0),
+ (0, -d, 0),
+ (d, 0, 0),
+ (-d, 0, 0),
+ ):
+ if not mask[tuple(patch_size // 2 + offset)[::-1]]:
+ continue
new_pos = np.array(pos) + np.array(offset)
enqueue(tuple(new_pos))
dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask)
inf_results[slice_expr] = 1 + np.cast[np.uint8](
- np.minimum(dist_transform, 5) / 5.0 * 254)
+ np.minimum(dist_transform, 5) / 5.0 * 254
+ )
self.viewer.defer_callback(update_view)
@@ -170,15 +193,17 @@ def _stop_flood_fill(self):
def _start_flood_fill(self, pos):
self._stop_flood_fill()
inf_results = zarr.zeros(
- self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
+ self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8
+ )
inf_volume = neuroglancer.LocalVolume(
- data=inf_results, dimensions=self.dimensions)
+ data=inf_results, dimensions=self.dimensions
+ )
with self.viewer.txn() as s:
- s.layers['points'] = neuroglancer.LocalAnnotationLayer(self.dimensions)
- s.layers['inference'] = neuroglancer.ImageLayer(
+ s.layers["points"] = neuroglancer.LocalAnnotationLayer(self.dimensions)
+ s.layers["inference"] = neuroglancer.ImageLayer(
source=inf_volume,
- shader='''
+ shader="""
void main() {
float v = toNormalized(getDataValue(0));
vec4 rgba = vec4(0,0,0,0);
@@ -187,7 +212,7 @@ def _start_flood_fill(self, pos):
}
emitRGBA(rgba);
}
-''',
+""",
)
self.flood_fill_event = threading.Event()
t = threading.Thread(
@@ -197,7 +222,8 @@ def _start_flood_fill(self, pos):
inf_results=inf_results,
inf_volume=inf_volume,
event=self.flood_fill_event,
- ))
+ ),
+ )
t.daemon = True
t.start()
@@ -211,7 +237,7 @@ def _stop_fill_action(self, action_state):
self._stop_flood_fill()
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/interactive_inference.py b/python/examples/interactive_inference.py
index 63c7f8f659..5baac06411 100755
--- a/python/examples/interactive_inference.py
+++ b/python/examples/interactive_inference.py
@@ -20,47 +20,50 @@
import argparse
import time
+import cloudvolume
import neuroglancer
import neuroglancer.cli
-import cloudvolume
-import zarr
import numpy as np
import scipy.ndimage
+import zarr
-class InteractiveInference(object):
+class InteractiveInference:
def __init__(self):
viewer = self.viewer = neuroglancer.Viewer()
- viewer.actions.add('inference', self._do_inference)
+ viewer.actions.add("inference", self._do_inference)
self.gt_vol = cloudvolume.CloudVolume(
- 'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
+ "https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth",
mip=0,
bounded=True,
progress=False,
- provenance={})
+ provenance={},
+ )
self.dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'z'],
- units='nm',
+ names=["x", "y", "z"],
+ units="nm",
scales=self.gt_vol.resolution,
)
self.inf_results = zarr.zeros(
- self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
+ self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8
+ )
self.inf_volume = neuroglancer.LocalVolume(
- data=self.inf_results, dimensions=self.dimensions)
+ data=self.inf_results, dimensions=self.dimensions
+ )
with viewer.config_state.txn() as s:
- s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'
+ s.input_event_bindings.data_view["shift+mousedown0"] = "inference"
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['ground_truth'].visible = False
- s.layers['inference'] = neuroglancer.ImageLayer(
+ s.layers["ground_truth"].visible = False
+ s.layers["inference"] = neuroglancer.ImageLayer(
source=self.inf_volume,
- shader='''
+ shader="""
void main() {
float v = toNormalized(getDataValue(0));
vec4 rgba = vec4(0,0,0,0);
@@ -69,34 +72,37 @@ def __init__(self):
}
emitRGBA(rgba);
}
-''',
+""",
)
def _do_inference(self, action_state):
pos = action_state.mouse_voxel_coordinates
if pos is None:
return
- patch_size = np.array((128, ) * 3, np.int64)
+ patch_size = np.array((128,) * 3, np.int64)
spos = pos - patch_size // 2
epos = spos + patch_size
- slice_expr = np.s_[int(spos[0]):int(epos[0]),
- int(spos[1]):int(epos[1]),
- int(spos[2]):int(epos[2])]
+ slice_expr = np.s_[
+ int(spos[0]) : int(epos[0]),
+ int(spos[1]) : int(epos[1]),
+ int(spos[2]) : int(epos[2]),
+ ]
gt_data = self.gt_vol[slice_expr][..., 0]
boundary_mask = gt_data == 0
- boundary_mask[:, :, :-1] |= (gt_data[:, :, :-1] != gt_data[:, :, 1:])
- boundary_mask[:, :, 1:] |= (gt_data[:, :, :-1] != gt_data[:, :, 1:])
- boundary_mask[:, :-1, :] |= (gt_data[:, :-1, :] != gt_data[:, 1:, :])
- boundary_mask[:, 1:, :] |= (gt_data[:, :-1, :] != gt_data[:, 1:, :])
- boundary_mask[:-1, :, :] |= (gt_data[:-1, :, :] != gt_data[1:, :, :])
- boundary_mask[1:, :, :] |= (gt_data[:-1, :, :] != gt_data[1:, :, :])
+ boundary_mask[:, :, :-1] |= gt_data[:, :, :-1] != gt_data[:, :, 1:]
+ boundary_mask[:, :, 1:] |= gt_data[:, :, :-1] != gt_data[:, :, 1:]
+ boundary_mask[:, :-1, :] |= gt_data[:, :-1, :] != gt_data[:, 1:, :]
+ boundary_mask[:, 1:, :] |= gt_data[:, :-1, :] != gt_data[:, 1:, :]
+ boundary_mask[:-1, :, :] |= gt_data[:-1, :, :] != gt_data[1:, :, :]
+ boundary_mask[1:, :, :] |= gt_data[:-1, :, :] != gt_data[1:, :, :]
dist_transform = scipy.ndimage.morphology.distance_transform_edt(~boundary_mask)
self.inf_results[slice_expr] = 1 + np.cast[np.uint8](
- np.minimum(dist_transform, 5) / 5.0 * 254)
+ np.minimum(dist_transform, 5) / 5.0 * 254
+ )
self.inf_volume.invalidate()
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/examples/jupyter-notebook-demo.ipynb b/python/examples/jupyter-notebook-demo.ipynb
index 4c48d8c663..9101a99b23 100644
--- a/python/examples/jupyter-notebook-demo.ipynb
+++ b/python/examples/jupyter-notebook-demo.ipynb
@@ -57,8 +57,7 @@
"source": [
"with viewer.txn() as s:\n",
" s.layers['image'] = neuroglancer.ImageLayer(source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image')\n",
- " s.layers['segmentation'] = neuroglancer.SegmentationLayer(source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth', selected_alpha=0.3)\n",
- " "
+ " s.layers['segmentation'] = neuroglancer.SegmentationLayer(source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth', selected_alpha=0.3)\n"
]
},
{
@@ -109,6 +108,7 @@
"outputs": [],
"source": [
"import tensorstore as ts\n",
+ "\n",
"image_vol = await ts.open({'driver': 'neuroglancer_precomputed', 'kvstore': 'gs://neuroglancer-public-data/flyem_fib-25/image/'})\n",
"a = np.zeros((200,200,200), np.uint8)\n",
"def make_thresholded(threshold):\n",
@@ -217,6 +217,7 @@
"outputs": [],
"source": [
"import copy\n",
+ "\n",
"new_state = copy.deepcopy(viewer.state)\n",
"new_state.layers['segmentation'].segments.add(10625)\n",
"viewer.set_state(new_state)"
@@ -243,8 +244,8 @@
" st.status_messages['hello'] = ('Got action %d: mouse position = %r' %\n",
" (num_actions, s.mouse_voxel_coordinates))\n",
" print('Got my-action')\n",
- " print(' Mouse position: %s' % (s.mouse_voxel_coordinates,))\n",
- " print(' Layer selected values: %s' % (s.selected_values,))\n",
+ " print(f' Mouse position: {s.mouse_voxel_coordinates}')\n",
+ " print(f' Layer selected values: {s.selected_values}')\n",
"viewer.actions.add('my-action', my_action)\n",
"with viewer.config_state.txn() as s:\n",
" s.input_event_bindings.viewer['keyt'] = 'my-action'\n",
@@ -283,6 +284,7 @@
"outputs": [],
"source": [
"from ipywidgets import Image\n",
+ "\n",
"screenshot = viewer.screenshot(size=[1000, 1000])\n",
"screenshot_image = Image(value=screenshot.screenshot.image)\n",
"screenshot_image"
diff --git a/python/examples/synaptic_partners.py b/python/examples/synaptic_partners.py
index 9ed685acb3..93903276ff 100755
--- a/python/examples/synaptic_partners.py
+++ b/python/examples/synaptic_partners.py
@@ -17,19 +17,19 @@
import collections
import json
import time
-import six
import neuroglancer
import neuroglancer.cli
+
def get_synapses_by_id(synapse_data):
synapses_by_id = {}
partner_counts = {}
for x in synapse_data:
- pre_id = x['T-bar']['body ID']
+ pre_id = x["T-bar"]["body ID"]
synapses_by_id.setdefault(pre_id, []).append(x)
- for partner in x['partners']:
- post_id = partner['body ID']
+ for partner in x["partners"]:
+ post_id = partner["body ID"]
synapses_by_id.setdefault(post_id, []).append(x)
partner_counts.setdefault(pre_id, collections.Counter())[post_id] += 1
partner_counts.setdefault(post_id, collections.Counter())[pre_id] += 1
@@ -37,76 +37,81 @@ def get_synapses_by_id(synapse_data):
return synapses_by_id, partner_counts
-class Demo(object):
- def __init__(self, synapse_path, top_method='min', num_top_partners=10):
- with open(synapse_path, 'r') as f:
- synapse_data = json.load(f)['data']
- self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(synapse_data)
+class Demo:
+ def __init__(self, synapse_path, top_method="min", num_top_partners=10):
+ with open(synapse_path) as f:
+ synapse_data = json.load(f)["data"]
+ self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
+ synapse_data
+ )
self.top_method = top_method
self.num_top_partners = num_top_partners
dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'z'],
- units='nm',
+ names=["x", "y", "z"],
+ units="nm",
scales=[8, 8, 8],
)
viewer = self.viewer = neuroglancer.Viewer()
- viewer.actions.add('select-custom', self._handle_select)
+ viewer.actions.add("select-custom", self._handle_select)
with viewer.config_state.txn() as s:
- s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
+ s.input_event_bindings.data_view["dblclick0"] = "select-custom"
with viewer.txn() as s:
s.projection_orientation = [0.63240087, 0.01582051, 0.05692779, 0.77238464]
s.dimensions = dimensions
s.position = [3000, 3000, 3000]
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/image",
+ )
+ s.layers["ground_truth"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["partners"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth",
)
- s.layers['partners'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
+ s.layers["synapses"] = neuroglancer.LocalAnnotationLayer(
+ dimensions=dimensions, linked_segmentation_layer="ground_truth"
+ )
+ s.layout = neuroglancer.row_layout(
+ [
+ neuroglancer.LayerGroupViewer(
+ layout="xy",
+ layers=["image", "ground_truth", "partners", "synapses"],
+ ),
+ neuroglancer.LayerGroupViewer(
+ layout="3d",
+ layers=["ground_truth", "synapses"],
+ ),
+ neuroglancer.LayerGroupViewer(
+ layout="3d",
+ layers=["partners", "synapses"],
+ ),
+ ]
)
- s.layers['synapses'] = neuroglancer.LocalAnnotationLayer(
- dimensions=dimensions,
- linked_segmentation_layer='ground_truth')
- s.layout = neuroglancer.row_layout([
- neuroglancer.LayerGroupViewer(
- layout='xy',
- layers=['image', 'ground_truth', 'partners', 'synapses'],
- ),
- neuroglancer.LayerGroupViewer(
- layout='3d',
- layers=['ground_truth', 'synapses'],
- ),
- neuroglancer.LayerGroupViewer(
- layout='3d',
- layers=['partners', 'synapses'],
- ),
- ])
self.selected_segments = frozenset()
self.viewer.shared_state.add_changed_callback(
- lambda: self.viewer.defer_callback(self.on_state_changed))
+ lambda: self.viewer.defer_callback(self.on_state_changed)
+ )
def _handle_select(self, action_state):
- segment_id = action_state.selected_values.get('ground_truth')
- if segment_id is None: return
+ segment_id = action_state.selected_values.get("ground_truth")
+ if segment_id is None:
+ return
segment_id = segment_id.value
with self.viewer.txn() as s:
- segments = s.layers['ground_truth'].segments
+ segments = s.layers["ground_truth"].segments
if segment_id in segments:
segments.remove(segment_id)
else:
segments.add(segment_id)
def on_state_changed(self):
- new_segments = self.viewer.state.layers['ground_truth'].segments
+ new_segments = self.viewer.state.layers["ground_truth"].segments
if new_segments != self.selected_segments:
self.selected_segments = new_segments
- self.viewer.defer_callback(
- self._update_synapses)
+ self.viewer.defer_callback(self._update_synapses)
def _update_synapses(self):
synapses = {}
@@ -116,54 +121,61 @@ def _update_synapses(self):
synapses[id(synapse)] = synapse
for segment_id in self.selected_segments:
- cur_counts = self.synapse_partner_counts.get(segment_id, collections.Counter())
+ cur_counts = self.synapse_partner_counts.get(
+ segment_id, collections.Counter()
+ )
if partner_counts is None:
partner_counts = cur_counts
continue
- if self.top_method == 'sum':
+ if self.top_method == "sum":
partner_counts = partner_counts + cur_counts
- elif self.top_method == 'min':
+ elif self.top_method == "min":
partner_counts = partner_counts & cur_counts
if partner_counts is None:
partner_counts = collections.Counter()
top_partners = sorted(
- (x for x in partner_counts.keys() if x not in self.selected_segments),
- key=lambda x: -partner_counts[x])
- top_partners = top_partners[:self.num_top_partners]
+ (x for x in partner_counts if x not in self.selected_segments),
+ key=lambda x: -partner_counts[x],
+ )
+ top_partners = top_partners[: self.num_top_partners]
with self.viewer.txn() as s:
- s.layers['partners'].segments = top_partners
- annotations = s.layers['synapses'].annotations
+ s.layers["partners"].segments = top_partners
+ annotations = s.layers["synapses"].annotations
del annotations[:]
- for synapse in six.itervalues(synapses):
- tbar = synapse['T-bar']
- for partner in synapse['partners']:
+ for synapse in synapses.values():
+ tbar = synapse["T-bar"]
+ for partner in synapse["partners"]:
annotations.append(
neuroglancer.LineAnnotation(
- id='%d' % id(partner),
- point_a=tbar['location'],
- point_b=partner['location'],
- segments=[tbar['body ID'], partner['body ID']],
- ))
+ id="%d" % id(partner),
+ point_a=tbar["location"],
+ point_b=partner["location"],
+ segments=[tbar["body ID"], partner["body ID"]],
+ )
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
import argparse
+
ap = argparse.ArgumentParser()
ap.add_argument(
- 'synapses',
- help=
- 'Path to synapse.json file downloaded from http://emdata.janelia.org/api/node/822524777d3048b8bd520043f90c1d28/.files/key/synapse.json'
+ "synapses",
+ help="Path to synapse.json file downloaded from http://emdata.janelia.org/api/node/822524777d3048b8bd520043f90c1d28/.files/key/synapse.json",
)
ap.add_argument(
- '-n',
- '--num-partners',
+ "-n",
+ "--num-partners",
default=10,
type=int,
- help='Number of top synaptic partners to display.')
+ help="Number of top synaptic partners to display.",
+ )
ap.add_argument(
- '--order',
- choices=['min', 'sum'],
- default='min',
- help='Method by which to combine synaptic partner counts from multiple segments.')
+ "--order",
+ choices=["min", "sum"],
+ default="min",
+ help="Method by which to combine synaptic partner counts from multiple segments.",
+ )
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
@@ -174,7 +186,7 @@ def _update_synapses(self):
top_method=args.order,
)
print(demo.viewer)
- import time
+
time.sleep(5000)
while True:
time.sleep(1000)
diff --git a/python/examples/webdriver_example.py b/python/examples/webdriver_example.py
index 2f745e38cd..a629a784f2 100644
--- a/python/examples/webdriver_example.py
+++ b/python/examples/webdriver_example.py
@@ -1,27 +1,32 @@
import argparse
+
import neuroglancer
-import neuroglancer.webdriver
import neuroglancer.cli
+import neuroglancer.webdriver
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
-ap.add_argument('--browser', choices=['chrome', 'firefox'], default='chrome')
+ap.add_argument("--browser", choices=["chrome", "firefox"], default="chrome")
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(
- source='precomputed://gs://neuroglancer-janelia-flyem-hemibrain/emdata/clahe_yz/jpeg',
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source="precomputed://gs://neuroglancer-janelia-flyem-hemibrain/emdata/clahe_yz/jpeg",
)
- s.layers['segmentation'] = neuroglancer.SegmentationLayer(
- source='precomputed://gs://neuroglancer-janelia-flyem-hemibrain/v1.1/segmentation',
+ s.layers["segmentation"] = neuroglancer.SegmentationLayer(
+ source="precomputed://gs://neuroglancer-janelia-flyem-hemibrain/v1.1/segmentation",
)
-webdriver = neuroglancer.webdriver.Webdriver(viewer, headless=False, browser=args.browser)
+webdriver = neuroglancer.webdriver.Webdriver(
+ viewer, headless=False, browser=args.browser
+)
def get_loading_progress():
- return webdriver.driver.execute_script('''
+ return webdriver.driver.execute_script(
+ """
const userLayer = viewer.layerManager.getLayerByName("segmentation").layer;
return userLayer.renderLayers.map(x => x.layerChunkProgressInfo)
- ''')
+ """
+ )
diff --git a/python/examples/write_annotations.py b/python/examples/write_annotations.py
index da30cd4e9f..00ee4119a6 100644
--- a/python/examples/write_annotations.py
+++ b/python/examples/write_annotations.py
@@ -9,15 +9,17 @@
import neuroglancer.write_annotations
import numpy as np
-def write_some_annotations(output_dir: str, coordinate_space: neuroglancer.CoordinateSpace):
+def write_some_annotations(
+ output_dir: str, coordinate_space: neuroglancer.CoordinateSpace
+):
writer = neuroglancer.write_annotations.AnnotationWriter(
coordinate_space=coordinate_space,
- annotation_type='point',
+ annotation_type="point",
properties=[
- neuroglancer.AnnotationPropertySpec(id='size', type='float32'),
- neuroglancer.AnnotationPropertySpec(id='cell_type', type='uint16'),
- neuroglancer.AnnotationPropertySpec(id='point_color', type='rgba'),
+ neuroglancer.AnnotationPropertySpec(id="size", type="float32"),
+ neuroglancer.AnnotationPropertySpec(id="cell_type", type="uint16"),
+ neuroglancer.AnnotationPropertySpec(id="point_color", type="rgba"),
],
)
@@ -26,7 +28,7 @@ def write_some_annotations(output_dir: str, coordinate_space: neuroglancer.Coord
writer.write(output_dir)
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -36,29 +38,33 @@ def write_some_annotations(output_dir: str, coordinate_space: neuroglancer.Coord
tempdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tempdir)
- coordinate_space = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
- units=['nm', 'nm', 'nm'],
- scales=[10, 10, 10])
+ coordinate_space = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units=["nm", "nm", "nm"], scales=[10, 10, 10]
+ )
write_some_annotations(output_dir=tempdir, coordinate_space=coordinate_space)
server = neuroglancer.static_file_server.StaticFileServer(
- static_dir=tempdir, bind_address=args.bind_address or '127.0.0.1',
- daemon=True)
+ static_dir=tempdir, bind_address=args.bind_address or "127.0.0.1", daemon=True
+ )
with viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
- data=np.full(fill_value=200, shape=(100, 100, 100), dtype=np.uint8),
- dimensions=coordinate_space),
- )
- s.layers['annotations'] = neuroglancer.AnnotationLayer(source=f'precomputed://{server.url}',
- tab='rendering',
- shader="""
+ s.layers["image"] = neuroglancer.ImageLayer(
+ source=neuroglancer.LocalVolume(
+ data=np.full(fill_value=200, shape=(100, 100, 100), dtype=np.uint8),
+ dimensions=coordinate_space,
+ ),
+ )
+ s.layers["annotations"] = neuroglancer.AnnotationLayer(
+ source=f"precomputed://{server.url}",
+ tab="rendering",
+ shader="""
void main() {
setColor(prop_point_color());
setPointMarkerSize(prop_size());
}
- """)
- s.selected_layer.layer = 'annotations'
+ """,
+ )
+ s.selected_layer.layer = "annotations"
s.selected_layer.visible = True
s.show_slices = False
print(viewer)
diff --git a/python/ext/src/generate_marching_cubes_tables.py b/python/ext/src/generate_marching_cubes_tables.py
index 61de616806..669c7c4d70 100644
--- a/python/ext/src/generate_marching_cubes_tables.py
+++ b/python/ext/src/generate_marching_cubes_tables.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import print_function
cube_corner_position_offsets = [
[0, 0, 0], #
@@ -22,7 +21,7 @@
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
- [0, 1, 1]
+ [0, 1, 1],
]
cube_edge_index_to_corner_index_pair_table = [
@@ -41,8 +40,11 @@
]
for edge_i, corners in enumerate(cube_edge_index_to_corner_index_pair_table):
- if cube_corner_position_offsets[corners[0]] > cube_corner_position_offsets[corners[1]]:
- print('edge %d is flipped' % (edge_i))
+ if (
+ cube_corner_position_offsets[corners[0]]
+ > cube_corner_position_offsets[corners[1]]
+ ):
+ print("edge %d is flipped" % (edge_i))
cube_edge_vertex_map_selectors_table = [0] * 256
cube_edge_mask_table = [0] * 256
@@ -51,18 +53,20 @@
selectors = 0
edge_mask = 0
for edge_i, corners in enumerate(cube_edge_index_to_corner_index_pair_table):
- edge_corners_present = [(corners_present >> corner_i) & 1 for corner_i in corners]
+ edge_corners_present = [
+ (corners_present >> corner_i) & 1 for corner_i in corners
+ ]
if 0 in edge_corners_present and 1 in edge_corners_present:
- edge_mask |= (1 << edge_i)
+ edge_mask |= 1 << edge_i
selector = edge_corners_present[0]
selectors |= selector << edge_i
cube_edge_mask_table[corners_present] = edge_mask
cube_edge_vertex_map_selectors_table[corners_present] = selectors
-print('static uint32_t cube_edge_mask_table[256] = {')
-print(', '.join(map(hex, cube_edge_mask_table)))
-print('};')
+print("static uint32_t cube_edge_mask_table[256] = {")
+print(", ".join(map(hex, cube_edge_mask_table)))
+print("};")
-print('static uint32_t cube_edge_vertex_map_selectors_table[256] = {')
-print(', '.join(map(hex, cube_edge_vertex_map_selectors_table)))
-print('};')
+print("static uint32_t cube_edge_vertex_map_selectors_table[256] = {")
+print(", ".join(map(hex, cube_edge_vertex_map_selectors_table)))
+print("};")
diff --git a/python/neuroglancer/__init__.py b/python/neuroglancer/__init__.py
index d034a6394a..2f569b06eb 100644
--- a/python/neuroglancer/__init__.py
+++ b/python/neuroglancer/__init__.py
@@ -12,16 +12,104 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from .server import set_static_content_source, set_server_bind_address, is_server_running, stop
-from .viewer import Viewer, UnsynchronizedViewer
-from .local_volume import LocalVolume
-from .viewer_state import *
-from .viewer_config_state import LayerSelectedValues, LayerSelectionState, SegmentIdMapEntry, PrefetchState, ScaleBarOptions
-from .equivalence_map import EquivalenceMap
-from .url_state import to_url, parse_url, to_json_dump
-from .screenshot import ScreenshotSaver
-from . import skeleton
-from . import server
-from . import segment_colors
-from .default_credentials_manager import set_boss_token
+
+from . import ( # noqa: I001
+ segment_colors, # noqa: F401
+ server, # noqa: F401
+ skeleton, # noqa: F401
+)
+from .default_credentials_manager import set_boss_token # noqa: F401
+from .equivalence_map import EquivalenceMap # noqa: F401
+from .local_volume import LocalVolume # noqa: F401
+from .screenshot import ScreenshotSaver # noqa: F401
+from .server import (
+ is_server_running, # noqa: F401
+ set_server_bind_address, # noqa: F401
+ set_static_content_source, # noqa: F401
+ stop, # noqa: F401
+)
+from .url_state import parse_url, to_json_dump, to_url # noqa: F401
+from .viewer import UnsynchronizedViewer, Viewer # noqa: F401
+from .viewer_config_state import (
+ LayerSelectedValues, # noqa: F401
+ LayerSelectionState, # noqa: F401
+ PrefetchState, # noqa: F401
+ ScaleBarOptions, # noqa: F401
+ SegmentIdMapEntry, # noqa: F401
+)
+from .viewer_state import (
+ CoordinateSpace, # noqa: F401
+ DimensionScale, # noqa: F401
+ CoordinateArray, # noqa: F401
+ Tool, # noqa: F401
+ PlacePointTool, # noqa: F401
+ PlaceLineTool, # noqa: F401
+ PlaceBoundingBoxTool, # noqa: F401
+ PlaceEllipsoidTool, # noqa: F401
+ BlendTool, # noqa: F401
+ OpacityTool, # noqa: F401
+ CrossSectionRenderScaleTool, # noqa: F401
+ SelectedAlphaTool, # noqa: F401
+ NotSelectedAlphaTool, # noqa: F401
+ ObjectAlphaTool, # noqa: F401
+ HideSegmentZeroTool, # noqa: F401
+ HoverHighlightTool, # noqa: F401
+ BaseSegmentColoringTool, # noqa: F401
+ IgnoreNullVisibleSetTool, # noqa: F401
+ ColorSeedTool, # noqa: F401
+ SegmentDefaultColorTool, # noqa: F401
+ MeshRenderScaleTool, # noqa: F401
+ MeshSilhouetteRenderingTool, # noqa: F401
+ SaturationTool, # noqa: F401
+ SkeletonRenderingMode2dTool, # noqa: F401
+ SkeletonRenderingMode3dTool, # noqa: F401
+ SkeletonRenderingLineWidth2dTool, # noqa: F401
+ SkeletonRenderingLineWidth3dTool, # noqa: F401
+ ShaderControlTool, # noqa: F401
+ MergeSegmentsTool, # noqa: F401
+ SplitSegmentsTool, # noqa: F401
+ SelectSegmentsTool, # noqa: F401
+ DimensionTool, # noqa: F401
+ tool, # noqa: F401
+ SidePanelLocation, # noqa: F401
+ SelectedLayerState, # noqa: F401
+ StatisticsDisplayState, # noqa: F401
+ LayerSidePanelState, # noqa: F401
+ LayerListPanelState, # noqa: F401
+ HelpPanelState, # noqa: F401
+ DimensionPlaybackVelocity, # noqa: F401
+ Layer, # noqa: F401
+ PointAnnotationLayer, # noqa: F401
+ CoordinateSpaceTransform, # noqa: F401
+ LayerDataSubsource, # noqa: F401
+ LayerDataSource, # noqa: F401
+ LayerDataSources, # noqa: F401
+ InvlerpParameters, # noqa: F401
+ ImageLayer, # noqa: F401
+ SkeletonRenderingOptions, # noqa: F401
+ StarredSegments, # noqa: F401
+ VisibleSegments, # noqa: F401
+ SegmentationLayer, # noqa: F401
+ SingleMeshLayer, # noqa: F401
+ PointAnnotation, # noqa: F401
+ LineAnnotation, # noqa: F401
+ AxisAlignedBoundingBoxAnnotation, # noqa: F401
+ EllipsoidAnnotation, # noqa: F401
+ AnnotationPropertySpec, # noqa: F401
+ AnnotationLayer, # noqa: F401
+ LocalAnnotationLayer, # noqa: F401
+ ManagedLayer, # noqa: F401
+ Layers, # noqa: F401
+ LinkedPosition, # noqa: F401
+ LinkedZoomFactor, # noqa: F401
+ LinkedDepthRange, # noqa: F401
+ LinkedOrientationState, # noqa: F401
+ CrossSection, # noqa: F401
+ CrossSectionMap, # noqa: F401
+ DataPanelLayout, # noqa: F401
+ StackLayout, # noqa: F401
+ row_layout, # noqa: F401
+ column_layout, # noqa: F401
+ LayerGroupViewer, # noqa: F401
+ ViewerState, # noqa: F401
+)
diff --git a/python/neuroglancer/async_util.py b/python/neuroglancer/async_util.py
index 20054b6bc0..867646194f 100644
--- a/python/neuroglancer/async_util.py
+++ b/python/neuroglancer/async_util.py
@@ -20,7 +20,6 @@
class BackgroundTornadoServerMetaclass(type):
-
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post_init__()
@@ -28,7 +27,6 @@ def __call__(cls, *args, **kwargs):
class BackgroundTornadoServer(metaclass=BackgroundTornadoServerMetaclass):
-
def __init__(self, daemon=False):
self._thread = threading.Thread(target=self._run_server)
if daemon:
@@ -49,7 +47,7 @@ def _run_server(self):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
- ioloop = tornado.platform.asyncio.AsyncIOMainLoop()
+ tornado.platform.asyncio.AsyncIOMainLoop()
self.loop = loop
loop.call_soon(self._start_server)
loop.run_forever()
@@ -72,7 +70,9 @@ def request_stop(self):
with self._stop_lock:
if not self._stop_requested:
self._stop_requested = True
- self.loop.call_soon_threadsafe(lambda: asyncio.create_task(self._stop()))
+ self.loop.call_soon_threadsafe(
+ lambda: asyncio.create_task(self._stop())
+ )
def stop(self):
self.request_stop()
@@ -80,5 +80,7 @@ def stop(self):
async def _stop(self):
self.http_server.stop()
- await tornado.platform.asyncio.to_asyncio_future(self.http_server.close_all_connections())
+ await tornado.platform.asyncio.to_asyncio_future(
+ self.http_server.close_all_connections()
+ )
self.loop.stop()
diff --git a/python/neuroglancer/boss_credentials.py b/python/neuroglancer/boss_credentials.py
index 24ecd5b6b4..8bbd5f1009 100644
--- a/python/neuroglancer/boss_credentials.py
+++ b/python/neuroglancer/boss_credentials.py
@@ -1,55 +1,61 @@
-from . import credentials_provider
-from .futures import run_on_new_thread
-import logging
-import threading
-import os
-from six.moves.configparser import ConfigParser
-
-class BossCredentialsProvider(credentials_provider.CredentialsProvider):
- def __init__(self):
- super(BossCredentialsProvider, self).__init__()
-
- # Make sure logging is initialized. Does nothing if logging has already
- # been initialized.
- logging.basicConfig()
-
- self._lock = threading.Lock()
- self._credentials = None
-
- def set_token(self, token):
- # Token should be a string
- self._credentials = dict(tokenType=u'Token', accessToken=token)
-
- def get_new(self):
- def func():
- with self._lock:
- # First, see if user has defined a token using set_token
- if self._credentials is not None:
- return self._credentials
-
- # If not, look for config file in intern file location
- config_path = '~/.intern/intern.cfg'
- if os.path.isfile(os.path.expanduser(config_path)):
- with open(os.path.expanduser(config_path), 'r') as config_file_handle:
- config_parser = ConfigParser()
- config_parser.readfp(config_file_handle)
- # Try Default section first
- try:
- self._credentials = config_parser["Default"]["token"]
- print("Using token from intern config file")
- return dict(tokenType=u'Token', accessToken=self._credentials)
- except:
- pass
- # Try Volume Service section second
- try:
- self._credentials = config_parser["Volume Service"]["token"]
- print("Using token from intern config file")
- return dict(tokenType=u'Token', accessToken=self._credentials)
- except:
- pass
-
- # Else, use "public"
- print("Accessing Boss data using token 'public'")
- return dict(tokenType=u'Token', accessToken='public')
-
- return run_on_new_thread(func)
+import logging
+import os
+import threading
+from configparser import ConfigParser
+
+from . import credentials_provider
+from .futures import run_on_new_thread
+
+
+class BossCredentialsProvider(credentials_provider.CredentialsProvider):
+ def __init__(self):
+ super().__init__()
+
+ # Make sure logging is initialized. Does nothing if logging has already
+ # been initialized.
+ logging.basicConfig()
+
+ self._lock = threading.Lock()
+ self._credentials = None
+
+ def set_token(self, token):
+ # Token should be a string
+ self._credentials = dict(tokenType="Token", accessToken=token)
+
+ def get_new(self):
+ def func():
+ with self._lock:
+ # First, see if user has defined a token using set_token
+ if self._credentials is not None:
+ return self._credentials
+
+ # If not, look for config file in intern file location
+ config_path = "~/.intern/intern.cfg"
+ if os.path.isfile(os.path.expanduser(config_path)):
+ with open(os.path.expanduser(config_path)) as config_file_handle:
+ config_parser = ConfigParser()
+ config_parser.readfp(config_file_handle)
+ # Try Default section first
+ try:
+ self._credentials = config_parser["Default"]["token"]
+ print("Using token from intern config file")
+ return dict(
+ tokenType="Token", accessToken=self._credentials
+ )
+ except Exception:
+ pass
+ # Try Volume Service section second
+ try:
+ self._credentials = config_parser["Volume Service"]["token"]
+ print("Using token from intern config file")
+ return dict(
+ tokenType="Token", accessToken=self._credentials
+ )
+ except Exception:
+ pass
+
+ # Else, use "public"
+ print("Accessing Boss data using token 'public'")
+ return dict(tokenType="Token", accessToken="public")
+
+ return run_on_new_thread(func)
diff --git a/python/neuroglancer/chunks.py b/python/neuroglancer/chunks.py
index d5706a36f8..34ba25cea0 100644
--- a/python/neuroglancer/chunks.py
+++ b/python/neuroglancer/chunks.py
@@ -36,4 +36,4 @@ def encode_npz(subvol):
def encode_raw(subvol):
- return subvol.tostring('C')
+ return subvol.tostring("C")
diff --git a/python/neuroglancer/cli.py b/python/neuroglancer/cli.py
index 160193174a..7c8da411a9 100644
--- a/python/neuroglancer/cli.py
+++ b/python/neuroglancer/cli.py
@@ -15,42 +15,53 @@
def add_server_arguments(ap):
"""Defines common options for the Neuroglancer server."""
- g = ap.add_argument_group(title='Neuroglancer server options')
+ g = ap.add_argument_group(title="Neuroglancer server options")
g.add_argument(
- '--bind-address',
- help='Bind address for Python web server. Use 127.0.0.1 (the default) to restrict access '
- 'to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.'
+ "--bind-address",
+ help="Bind address for Python web server. Use 127.0.0.1 (the default) to restrict access "
+ "to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.",
+ )
+ g.add_argument(
+ "--static-content-url",
+ help="Obtain the Neuroglancer client code from the specified URL.",
+ )
+ g.add_argument(
+ "--debug-server",
+ action="store_true",
+ help="Log requests to web server used for Neuroglancer Python API",
)
- g.add_argument('--static-content-url',
- help='Obtain the Neuroglancer client code from the specified URL.')
- g.add_argument('--debug-server',
- action='store_true',
- help='Log requests to web server used for Neuroglancer Python API')
-def add_state_arguments(ap, required=False, dest='state'):
+def add_state_arguments(ap, required=False, dest="state"):
"""Defines options for specifying a Neuroglancer state."""
g = ap.add_mutually_exclusive_group(required=required)
def neuroglancer_url(s):
from .url_state import parse_url
+
return parse_url(s)
- g.add_argument('--url',
- type=neuroglancer_url,
- dest=dest,
- help='Neuroglancer URL from which to obtain state.')
+ g.add_argument(
+ "--url",
+ type=neuroglancer_url,
+ dest=dest,
+ help="Neuroglancer URL from which to obtain state.",
+ )
def json_state(path):
import json
+
from . import viewer_state
- with open(path, 'r') as f:
+
+ with open(path) as f:
return viewer_state.ViewerState(json.load(f))
- g.add_argument('--json',
- type=json_state,
- dest=dest,
- help='Path to file containing Neuroglancer JSON state.')
+ g.add_argument(
+ "--json",
+ type=json_state,
+ dest=dest,
+ help="Path to file containing Neuroglancer JSON state.",
+ )
def handle_server_arguments(args):
diff --git a/python/neuroglancer/coordinate_space.py b/python/neuroglancer/coordinate_space.py
index 9f11813e9c..b8357b4697 100644
--- a/python/neuroglancer/coordinate_space.py
+++ b/python/neuroglancer/coordinate_space.py
@@ -1,4 +1,3 @@
-# coding=utf-8
# @license
# Copyright 2019-2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,6 +14,8 @@
"""Wrappers for representing a Neuroglancer coordinate space."""
import collections
+import re
+
import numpy as np
__all__ = []
@@ -26,36 +27,37 @@ def export(obj):
si_prefixes = {
- 'Y': 24,
- 'Z': 21,
- 'E': 18,
- 'P': 15,
- 'T': 12,
- 'G': 9,
- 'M': 6,
- 'k': 3,
- 'h': 2,
- '': 0,
- 'c': -2,
- 'm': -3,
- 'u': -6,
- 'µ': -6,
- 'n': -9,
- 'p': -12,
- 'f': -15,
- 'a': -18,
- 'z': -21,
- 'y': -24,
+ "Y": 24,
+ "Z": 21,
+ "E": 18,
+ "P": 15,
+ "T": 12,
+ "G": 9,
+ "M": 6,
+ "k": 3,
+ "h": 2,
+ "": 0,
+ "c": -2,
+ "m": -3,
+ "u": -6,
+ "µ": -6,
+ "n": -9,
+ "p": -12,
+ "f": -15,
+ "a": -18,
+ "z": -21,
+ "y": -24,
}
-si_units = ['m', 's', 'rad/s', 'Hz']
+si_units = ["m", "s", "rad/s", "Hz"]
si_units_with_prefixes = {
- '%s%s' % (prefix, unit): (unit, exponent)
- for (prefix, exponent) in si_prefixes.items() for unit in si_units
+ f"{prefix}{unit}": (unit, exponent)
+ for (prefix, exponent) in si_prefixes.items()
+ for unit in si_units
}
-si_units_with_prefixes[''] = ('', 0)
+si_units_with_prefixes[""] = ("", 0)
def parse_unit(scale, unit):
@@ -63,12 +65,37 @@ def parse_unit(scale, unit):
if exponent >= 0:
return (scale * 10**exponent, unit)
else:
- return (scale / 10**(-exponent), unit)
+ return (scale / 10 ** (-exponent), unit)
+
+
+def parse_unit_and_scale(unit_and_scale: str) -> tuple[float, str]:
+ if unit_and_scale == "":
+ return (1, "")
+ m = re.fullmatch(
+ r"/^((?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)?([µa-zA-Z]+)?$", unit_and_scale
+ )
+ if m is None:
+ raise ValueError("Invalid unit", unit_and_scale)
+ scale_str = m.group(1)
+ if scale_str is None:
+ scale = 1.0
+ else:
+ scale = float(scale_str)
+
+ unit = ""
+ unit_str = m.group(2)
+ if unit_str is not None:
+ unit, exponent = si_units_with_prefixes[unit_str]
+ if exponent >= 0:
+ scale *= 10**exponent
+ else:
+ scale /= 10 ** (-exponent)
+ return (scale, unit)
@export
class CoordinateArray:
- __slots__ = ('_data')
+ __slots__ = "_data"
def __init__(self, json_data=None, labels=None, coordinates=None, mappings=None):
if mappings is None:
@@ -81,11 +108,16 @@ def __init__(self, json_data=None, labels=None, coordinates=None, mappings=None)
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
if json_data is not None:
- if not isinstance(json_data,
- dict) or 'coordinates' not in json_data or 'labels' not in json_data:
- raise ValueError('Expected object with "coordinates" and "labels" properties')
- coordinates = json_data['coordinates']
- labels = json_data['labels']
+ if (
+ not isinstance(json_data, dict)
+ or "coordinates" not in json_data
+ or "labels" not in json_data
+ ):
+ raise ValueError(
+ 'Expected object with "coordinates" and "labels" properties'
+ )
+ coordinates = json_data["coordinates"]
+ labels = json_data["labels"]
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
self._data = mappings
@@ -103,27 +135,32 @@ def __str__(self):
return str(self._data)
def __eq__(self, other):
- if not isinstance(other, CoordinateArray): return False
+ if not isinstance(other, CoordinateArray):
+ return False
return self._data == other._data
def __getitem__(self, k):
if isinstance(k, str):
for other_k, other_v in self._data.items():
- if other_k == k: return other_v
- raise KeyError('label not found: %r' % (k, ))
+ if other_k == k:
+ return other_v
+ raise KeyError(f"label not found: {k!r}")
return self._data[k]
def to_json(self):
- return dict(coordinates=list(self._data.keys()), labels=list(self._data.values()))
+ return dict(
+ coordinates=list(self._data.keys()), labels=list(self._data.values())
+ )
@export
-class DimensionScale(collections.namedtuple('DimensionScale',
- ['scale', 'unit', 'coordinate_array'])):
+class DimensionScale(
+ collections.namedtuple("DimensionScale", ["scale", "unit", "coordinate_array"])
+):
__slots__ = ()
- def __new__(cls, scale=1, unit='', coordinate_array=None):
- return super(DimensionScale, cls).__new__(cls, scale, unit, coordinate_array)
+ def __new__(cls, scale=1, unit="", coordinate_array=None):
+ return super().__new__(cls, scale, unit, coordinate_array)
@staticmethod
def from_json(json):
@@ -131,7 +168,7 @@ def from_json(json):
return json
if isinstance(json, list):
if len(json) != 2:
- raise ValueError('Expected [scale, unit], but received: %r' % (json, ))
+ raise ValueError(f"Expected [scale, unit], but received: {json!r}")
scale = json[0]
unit = json[1]
coordinate_array = None
@@ -143,10 +180,12 @@ def from_json(json):
@export
-class CoordinateSpace(object):
- __slots__ = ('names', 'scales', 'units', 'coordinate_arrays')
+class CoordinateSpace:
+ __slots__ = ("names", "scales", "units", "coordinate_arrays")
- def __init__(self, json=None, names=None, scales=None, units=None, coordinate_arrays=None):
+ def __init__(
+ self, json=None, names=None, scales=None, units=None, coordinate_arrays=None
+ ):
if json is None:
if names is not None:
self.names = tuple(names)
@@ -154,7 +193,8 @@ def __init__(self, json=None, names=None, scales=None, units=None, coordinate_ar
if isinstance(units, str):
units = tuple(units for _ in names)
scales_and_units = tuple(
- parse_unit(scale, unit) for scale, unit in zip(scales, units))
+ parse_unit(scale, unit) for scale, unit in zip(scales, units)
+ )
scales = np.array([s[0] for s in scales_and_units], dtype=np.float64)
units = tuple(s[1] for s in scales_and_units)
if coordinate_arrays is None:
@@ -170,7 +210,8 @@ def __init__(self, json=None, names=None, scales=None, units=None, coordinate_ar
self.units = ()
self.coordinate_arrays = ()
else:
- if not isinstance(json, dict): raise TypeError
+ if not isinstance(json, dict):
+ raise TypeError
self.names = tuple(json.keys())
values = tuple(DimensionScale.from_json(v) for v in json.values())
self.scales = np.array([v.scale for v in values], dtype=np.float64)
@@ -185,27 +226,35 @@ def rank(self):
def __getitem__(self, i):
if isinstance(i, str):
idx = self.names.index(i)
- return DimensionScale(scale=self.scales[idx],
- unit=self.units[idx],
- coordinate_array=self.coordinate_arrays[idx])
+ return DimensionScale(
+ scale=self.scales[idx],
+ unit=self.units[idx],
+ coordinate_array=self.coordinate_arrays[idx],
+ )
if isinstance(i, slice):
idxs = range(self.rank)[i]
return [
- DimensionScale(scale=self.scales[j],
- unit=self.units[j],
- coordinate_array=self.coordinate_arrays[j]) for j in idxs
+ DimensionScale(
+ scale=self.scales[j],
+ unit=self.units[j],
+ coordinate_array=self.coordinate_arrays[j],
+ )
+ for j in idxs
]
- return DimensionScale(scale=self.scales[i],
- unit=self.units[i],
- coordinate_array=self.coordinate_arrays[i])
+ return DimensionScale(
+ scale=self.scales[i],
+ unit=self.units[i],
+ coordinate_array=self.coordinate_arrays[i],
+ )
def __repr__(self):
- return 'CoordinateSpace(%r)' % (self.to_json(), )
+ return f"CoordinateSpace({self.to_json()!r})"
def to_json(self):
- d = collections.OrderedDict()
- for name, scale, unit, coordinate_array in zip(self.names, self.scales, self.units,
- self.coordinate_arrays):
+ d = {}
+ for name, scale, unit, coordinate_array in zip(
+ self.names, self.scales, self.units, self.coordinate_arrays
+ ):
if coordinate_array is None:
d[name] = [scale, unit]
else:
diff --git a/python/neuroglancer/credentials_provider.py b/python/neuroglancer/credentials_provider.py
index 6855ea3f85..aa661aa5d7 100644
--- a/python/neuroglancer/credentials_provider.py
+++ b/python/neuroglancer/credentials_provider.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import concurrent.futures
import threading
@@ -20,7 +19,7 @@
from .futures import future_then_immediate
-class CredentialsManager(object):
+class CredentialsManager:
def __init__(self):
self._providers = dict()
@@ -31,7 +30,7 @@ def get(self, key, parameters):
return self._providers[key](parameters)
-class CredentialsProvider(object):
+class CredentialsProvider:
next_generation = 0
next_generation_lock = threading.Lock()
@@ -42,8 +41,10 @@ def __init__(self):
def get(self, invalid_generation=None):
with self._lock:
- if self.future is not None and (self.credentials is None or
- invalid_generation != self.credentials['generation']):
+ if self.future is not None and (
+ self.credentials is None
+ or invalid_generation != self.credentials["generation"]
+ ):
return self.future
self.credentials = None
self.future = future = concurrent.futures.Future()
@@ -53,12 +54,15 @@ def attach_generation_and_save_credentials(credentials):
with CredentialsProvider.next_generation_lock:
CredentialsProvider.next_generation += 1
next_generation = CredentialsProvider.next_generation
- credentials_with_generation = dict(credentials=credentials,
- generation=next_generation)
+ credentials_with_generation = dict(
+ credentials=credentials, generation=next_generation
+ )
self.credentials = credentials_with_generation
return credentials_with_generation
- future_then_immediate(self.get_new(), attach_generation_and_save_credentials, future)
+ future_then_immediate(
+ self.get_new(), attach_generation_and_save_credentials, future
+ )
return future
def get_new(self):
diff --git a/python/neuroglancer/default_credentials_manager.py b/python/neuroglancer/default_credentials_manager.py
index 49a3f557b3..62699ec23d 100644
--- a/python/neuroglancer/default_credentials_manager.py
+++ b/python/neuroglancer/default_credentials_manager.py
@@ -12,31 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from . import credentials_provider, google_credentials, boss_credentials, dvid_credentials
+from . import (
+ boss_credentials,
+ credentials_provider,
+ dvid_credentials,
+ google_credentials,
+)
default_credentials_manager = credentials_provider.CredentialsManager()
boss_credentials_provider = boss_credentials.BossCredentialsProvider()
default_credentials_manager.register(
- u'google-brainmaps',
+ "google-brainmaps",
lambda _parameters: google_credentials.GoogleOAuth2FlowCredentialsProvider(
- client_id=u'639403125587-ue3c18dalqidqehs1n1p5rjvgni5f7qu.apps.googleusercontent.com',
- client_secret=u'kuaqECaVXOKEJ2L6ifZu4Aqt',
- scopes=[u'https://www.googleapis.com/auth/brainmaps'],
- ))
+ client_id="639403125587-ue3c18dalqidqehs1n1p5rjvgni5f7qu.apps.googleusercontent.com",
+ client_secret="kuaqECaVXOKEJ2L6ifZu4Aqt",
+ scopes=["https://www.googleapis.com/auth/brainmaps"],
+ ),
+)
default_credentials_manager.register(
- u'gcs',
- lambda _parameters: google_credentials.get_google_application_default_credentials_provider())
+ "gcs",
+ lambda _parameters: google_credentials.get_google_application_default_credentials_provider(),
+)
default_credentials_manager.register(
- u'boss',
- lambda _parameters: boss_credentials_provider
+ "boss", lambda _parameters: boss_credentials_provider
)
default_credentials_manager.register(
- u'DVID',
- lambda parameters: dvid_credentials.get_tokenbased_application_default_credentials_provider(parameters)
+ "DVID",
+ lambda parameters: dvid_credentials.get_tokenbased_application_default_credentials_provider(
+ parameters
+ ),
)
+
def set_boss_token(token):
boss_credentials_provider.set_token(token)
diff --git a/python/neuroglancer/downsample_scales.py b/python/neuroglancer/downsample_scales.py
index f8add96bd6..b8840f6ab1 100644
--- a/python/neuroglancer/downsample_scales.py
+++ b/python/neuroglancer/downsample_scales.py
@@ -12,50 +12,58 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import division
import numpy as np
DEFAULT_MAX_DOWNSAMPLING = 64
DEFAULT_MAX_DOWNSAMPLED_SIZE = 128
-DEFAULT_MAX_DOWNSAMPLING_SCALES = float('inf')
+DEFAULT_MAX_DOWNSAMPLING_SCALES = float("inf")
-def compute_near_isotropic_downsampling_scales(size,
- voxel_size,
- dimensions_to_downsample,
- max_scales=DEFAULT_MAX_DOWNSAMPLING_SCALES,
- max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
- max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
+def compute_near_isotropic_downsampling_scales(
+ size,
+ voxel_size,
+ dimensions_to_downsample,
+ max_scales=DEFAULT_MAX_DOWNSAMPLING_SCALES,
+ max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
+ max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE,
+):
"""Compute a list of successive downsampling factors."""
num_dims = len(voxel_size)
- cur_scale = np.ones((num_dims, ), dtype=int)
+ cur_scale = np.ones((num_dims,), dtype=int)
scales = [tuple(cur_scale)]
- while (len(scales) < max_scales and (np.prod(cur_scale) < max_downsampling) and
- (size / cur_scale).max() > max_downsampled_size):
+ while (
+ len(scales) < max_scales
+ and (np.prod(cur_scale) < max_downsampling)
+ and (size / cur_scale).max() > max_downsampled_size
+ ):
# Find dimension with smallest voxelsize.
cur_voxel_size = cur_scale * voxel_size
- smallest_cur_voxel_size_dim = dimensions_to_downsample[np.argmin(cur_voxel_size[
- dimensions_to_downsample])]
+ smallest_cur_voxel_size_dim = dimensions_to_downsample[
+ np.argmin(cur_voxel_size[dimensions_to_downsample])
+ ]
cur_scale[smallest_cur_voxel_size_dim] *= 2
target_voxel_size = cur_voxel_size[smallest_cur_voxel_size_dim] * 2
for d in dimensions_to_downsample:
if d == smallest_cur_voxel_size_dim:
continue
d_voxel_size = cur_voxel_size[d]
- if abs(d_voxel_size - target_voxel_size) > abs(d_voxel_size * 2 - target_voxel_size):
+ if abs(d_voxel_size - target_voxel_size) > abs(
+ d_voxel_size * 2 - target_voxel_size
+ ):
cur_scale[d] *= 2
scales.append(tuple(cur_scale))
return scales
def compute_two_dimensional_near_isotropic_downsampling_scales(
- size,
- voxel_size,
- max_scales=float('inf'),
- max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
- max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
+ size,
+ voxel_size,
+ max_scales=float("inf"),
+ max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
+ max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE,
+):
"""Compute a list of successive downsampling factors for 2-d tiles."""
max_scales = min(max_scales, 10)
@@ -68,17 +76,22 @@ def compute_two_dimensional_near_isotropic_downsampling_scales(
voxel_size=voxel_size,
dimensions_to_downsample=dimensions_to_downsample,
max_scales=max_scales,
- max_downsampling=float('inf'),
- max_downsampled_size=0, ) for dimensions_to_downsample in [[0, 1], [0, 2], [1, 2]]
+ max_downsampling=float("inf"),
+ max_downsampled_size=0,
+ )
+ for dimensions_to_downsample in [[0, 1], [0, 2], [1, 2]]
]
# Truncate all list of scales to the same length, once the stopping criteria
# is reached for all values of dimensions_to_downsample.
- scales = [((1, ) * 3, ) * 3]
+ scales = [((1,) * 3,) * 3]
size = np.array(size)
def scale_satisfies_criteria(scale):
- return np.prod(scale) < max_downsampling and (size / scale).max() > max_downsampled_size
+ return (
+ np.prod(scale) < max_downsampling
+ and (size / scale).max() > max_downsampled_size
+ )
for i in range(1, max_scales):
cur_scales = tuple(scales_transpose[d][i] for d in range(3))
diff --git a/python/neuroglancer/dvid_credentials.py b/python/neuroglancer/dvid_credentials.py
index bdd2fe9d3d..1e1ebaca15 100644
--- a/python/neuroglancer/dvid_credentials.py
+++ b/python/neuroglancer/dvid_credentials.py
@@ -15,7 +15,6 @@
"""Module implements function for authentication of layers based on DVID.
Here tokens are fetched from local locations like env vars etc."""
-from __future__ import absolute_import
import logging
import os
@@ -26,7 +25,7 @@
class TokenbasedDefaultCredentialsProvider(credentials_provider.CredentialsProvider):
def __init__(self, parameters):
- super(TokenbasedDefaultCredentialsProvider, self).__init__()
+ super().__init__()
# Make sure logging is initialized.
# Does nothing if logging has already been initialized.
@@ -37,16 +36,17 @@ def __init__(self, parameters):
def get_new(self):
def func():
try:
- credentials = os.environ['DVID_CREDENTIALS']
+ credentials = os.environ["DVID_CREDENTIALS"]
credentials = dict(item.split("=") for item in credentials.split(","))
- token = credentials[self.parameters['dvidServer']]
+ token = credentials[self.parameters["dvidServer"]]
except KeyError:
raise RuntimeError(
"""DVID_CREDENTIALS is not defined in your environment or does
- not contain the token for the server: """ +
- self.parameters['dvidServer'])
- self._credentials['token'] = token
- return dict(tokenType=u'Bearer', accessToken=self._credentials['token'])
+ not contain the token for the server: """
+ + self.parameters["dvidServer"]
+ )
+ self._credentials["token"] = token
+ return dict(tokenType="Bearer", accessToken=self._credentials["token"])
return run_on_new_thread(func)
@@ -57,6 +57,7 @@ def func():
def get_tokenbased_application_default_credentials_provider(parameters):
global _global_tokenbased_application_default_credentials_provider
if _global_tokenbased_application_default_credentials_provider is None:
- _global_tokenbased_application_default_credentials_provider =\
+ _global_tokenbased_application_default_credentials_provider = (
TokenbasedDefaultCredentialsProvider(parameters)
+ )
return _global_tokenbased_application_default_credentials_provider
diff --git a/python/neuroglancer/equivalence_map.py b/python/neuroglancer/equivalence_map.py
index 44200145f1..acab105efd 100644
--- a/python/neuroglancer/equivalence_map.py
+++ b/python/neuroglancer/equivalence_map.py
@@ -12,14 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import copy
-import six
-
-class EquivalenceMap(object):
+class EquivalenceMap:
"""Union-find data structure"""
supports_readonly = True
@@ -39,7 +36,7 @@ def __init__(self, existing=None, _readonly=False):
self._readonly = False
if existing is not None:
if isinstance(existing, dict):
- existing = six.viewitems(existing)
+ existing = existing.items()
for group in existing:
self.union(*group)
self._readonly = _readonly
@@ -76,10 +73,10 @@ def __iter__(self):
return iter(self._parents)
def items(self):
- return six.viewitems(self._parents)
+ return self._parents.items()
def keys(self):
- return six.viewkeys(self._parents)
+ return self._parents.keys()
def clear(self):
self._weights.clear()
@@ -148,7 +145,7 @@ def sets(self):
sets = {}
for x in self._parents:
sets.setdefault(self[x], set()).add(x)
- return frozenset(frozenset(v) for v in six.viewvalues(sets))
+ return frozenset(frozenset(v) for v in sets.values())
def to_json(self):
"""Returns the equivalence classes a sorted list of sorted lists."""
diff --git a/python/neuroglancer/futures.py b/python/neuroglancer/futures.py
index f3a2c92d1c..84b110ef1e 100644
--- a/python/neuroglancer/futures.py
+++ b/python/neuroglancer/futures.py
@@ -14,13 +14,12 @@
"""Various extensions to the concurrent.futures module."""
-from __future__ import absolute_import
import concurrent.futures
import threading
-def future_then_immediate(future, func, new_future = None):
+def future_then_immediate(future, func, new_future=None):
"""Returns a future that maps the result of `future` by `func`.
If `future` succeeds, sets the result of the returned future to `func(future.result())`. If
@@ -50,6 +49,7 @@ def run_on_new_thread(func, daemon=True):
:returns: A concurrent.futures.Future object representing the result.
"""
f = concurrent.futures.Future()
+
def wrapper():
if not f.set_running_or_notify_cancel():
return
@@ -57,6 +57,7 @@ def wrapper():
f.set_result(func())
except Exception as e:
f.set_exception(e)
+
t = threading.Thread(target=wrapper)
t.daemon = daemon
t.start()
diff --git a/python/neuroglancer/google_credentials.py b/python/neuroglancer/google_credentials.py
index aa184a6a3e..13b57d6b75 100644
--- a/python/neuroglancer/google_credentials.py
+++ b/python/neuroglancer/google_credentials.py
@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-import concurrent.futures
import logging
import threading
@@ -24,7 +22,7 @@
class GoogleOAuth2FlowCredentialsProvider(credentials_provider.CredentialsProvider):
def __init__(self, scopes, client_id, client_secret):
- super(GoogleOAuth2FlowCredentialsProvider, self).__init__()
+ super().__init__()
self.scopes = scopes
self.client_id = client_id
self.client_secret = client_secret
@@ -36,21 +34,26 @@ def __init__(self, scopes, client_id, client_secret):
def get_new(self):
def func():
import apitools.base.py.credentials_lib
+
result = apitools.base.py.credentials_lib.GetCredentials(
- package_name='',
+ package_name="",
scopes=self.scopes,
client_id=self.client_id,
client_secret=self.client_secret,
- user_agent=u'python-neuroglancer',
+ user_agent="python-neuroglancer",
+ )
+ return dict(
+ tokenType="Bearer", accessToken=result.get_access_token().access_token
)
- return dict(tokenType=u'Bearer', accessToken=result.get_access_token().access_token)
return run_on_new_thread(func)
-class GoogleApplicationDefaultCredentialsProvider(credentials_provider.CredentialsProvider):
+class GoogleApplicationDefaultCredentialsProvider(
+ credentials_provider.CredentialsProvider
+):
def __init__(self):
- super(GoogleApplicationDefaultCredentialsProvider, self).__init__()
+ super().__init__()
# Make sure logging is initialized. Does nothing if logging has already
# been initialized.
@@ -64,15 +67,16 @@ def func():
with self._lock:
if self._credentials is None:
import google.auth
+
credentials, project = google.auth.default()
del project
self._credentials = credentials
if not self._credentials.valid:
import google.auth.transport.requests
- import requests
+
request = google.auth.transport.requests.Request()
self._credentials.refresh(request)
- return dict(tokenType=u'Bearer', accessToken=self._credentials.token)
+ return dict(tokenType="Bearer", accessToken=self._credentials.token)
return run_on_new_thread(func)
@@ -85,6 +89,7 @@ def get_google_application_default_credentials_provider():
global _global_google_application_default_credentials_provider
with _global_google_application_default_credentials_provider_lock:
if _global_google_application_default_credentials_provider is None:
- _global_google_application_default_credentials_provider = GoogleApplicationDefaultCredentialsProvider(
+ _global_google_application_default_credentials_provider = (
+ GoogleApplicationDefaultCredentialsProvider()
)
return _global_google_application_default_credentials_provider
diff --git a/python/neuroglancer/json_utils.py b/python/neuroglancer/json_utils.py
index 5202715a1a..ce0b7fcdb3 100644
--- a/python/neuroglancer/json_utils.py
+++ b/python/neuroglancer/json_utils.py
@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-import collections
import json
import numbers
@@ -25,9 +23,12 @@
min_safe_integer = -9007199254740991
max_safe_integer = 9007199254740991
+
def json_encoder_default(obj):
"""JSON encoder function that handles some numpy types."""
- if isinstance(obj, numbers.Integral) and (obj < min_safe_integer or obj > max_safe_integer):
+ if isinstance(obj, numbers.Integral) and (
+ obj < min_safe_integer or obj > max_safe_integer
+ ):
return str(obj)
if isinstance(obj, np.integer):
return str(obj)
@@ -39,16 +40,20 @@ def json_encoder_default(obj):
return list(obj)
raise TypeError
+
def json_encoder_default_for_repr(obj):
if isinstance(obj, local_volume.LocalVolume):
- return ''
+ return ""
return json_encoder_default(obj)
+
def decode_json(x):
- return json.loads(x, object_pairs_hook=collections.OrderedDict)
+ return json.loads(x)
+
def encode_json(obj):
return json.dumps(obj, default=json_encoder_default)
+
def encode_json_for_repr(obj):
return json.dumps(obj, default=json_encoder_default_for_repr)
diff --git a/python/neuroglancer/json_wrappers.py b/python/neuroglancer/json_wrappers.py
index 70f2051fd4..00ddeac068 100644
--- a/python/neuroglancer/json_wrappers.py
+++ b/python/neuroglancer/json_wrappers.py
@@ -13,23 +13,19 @@
# limitations under the License.
"""Facilities for converting JSON <-> Python objects"""
-from __future__ import absolute_import
-import collections
import copy
import inspect
import numbers
import threading
+from typing import Any, Callable, ClassVar, Generic, TypeVar, Union
import numpy as np
-import six
-
-from six.moves import range
from .json_utils import encode_json_for_repr
-def to_json(value):
+def to_json(value: Any) -> Any:
if isinstance(value, np.ndarray):
return value.tolist()
try:
@@ -39,11 +35,19 @@ def to_json(value):
return method()
-class JsonObjectWrapper(object):
+_T = TypeVar("_T")
+
+
+class JsonObjectWrapper:
supports_readonly = True
supports_validation = True
- __slots__ = ('_json_data', '_cached_wrappers', '_lock', '_readonly')
+ __slots__ = ("_json_data", "_cached_wrappers", "_lock", "_readonly")
+
+ _json_data: Any
+ _cached_wrappers: dict[str, Any]
+ _lock: threading.RLock
+ _readonly: bool
def __init__(self, json_data=None, _readonly=False, **kwargs):
if json_data is None:
@@ -52,20 +56,20 @@ def __init__(self, json_data=None, _readonly=False, **kwargs):
json_data = json_data.to_json()
elif not isinstance(json_data, dict):
raise TypeError
- object.__setattr__(self, '_json_data', json_data)
- object.__setattr__(self, '_cached_wrappers', dict())
- object.__setattr__(self, '_lock', threading.RLock())
- object.__setattr__(self, '_readonly', 1 if _readonly else False)
+ object.__setattr__(self, "_json_data", json_data)
+ object.__setattr__(self, "_cached_wrappers", dict())
+ object.__setattr__(self, "_lock", threading.RLock())
+ object.__setattr__(self, "_readonly", 1 if _readonly else False)
for k in kwargs:
setattr(self, k, kwargs[k])
- object.__setattr__(self, '_readonly', _readonly)
+ object.__setattr__(self, "_readonly", _readonly)
def to_json(self):
if self._readonly:
return self._json_data
with self._lock:
r = self._json_data.copy()
- for k, (wrapper, _) in six.iteritems(self._cached_wrappers):
+ for k, (wrapper, _) in self._cached_wrappers.items():
if wrapper is not None:
r[k] = to_json(wrapper)
else:
@@ -79,7 +83,7 @@ def __eq__(self, other):
return type(self) == type(other) and self.to_json() == other.to_json()
def __repr__(self):
- return u'%s(%s)' % (type(self).__name__, encode_json_for_repr(self.to_json()))
+ return f"{type(self).__name__}({encode_json_for_repr(self.to_json())})"
def _get_wrapped(self, key, wrapped_type):
with self._lock:
@@ -88,8 +92,8 @@ def _get_wrapped(self, key, wrapped_type):
if cached_value is not None and cached_value[1] is json_value:
return cached_value[0]
kwargs = dict()
- if self._readonly and hasattr(wrapped_type, 'supports_readonly'):
- kwargs['_readonly'] = True
+ if self._readonly and hasattr(wrapped_type, "supports_readonly"):
+ kwargs["_readonly"] = True
wrapper = wrapped_type(json_value, **kwargs)
self._cached_wrappers[key] = wrapper, json_value
return wrapper
@@ -101,37 +105,46 @@ def _set_wrapped(self, key, value, validator):
with self._lock:
self._cached_wrappers[key] = (value, self._json_data.get(key))
+
_types_supporting_validation = frozenset([np.uint64, float, int])
+
def _normalize_validator(wrapped_type, validator):
if validator is None:
- supports_validation = getattr(wrapped_type, 'supports_validation', None)
- if (inspect.isroutine(wrapped_type) or supports_validation is not None
- or wrapped_type in _types_supporting_validation):
+ supports_validation = getattr(wrapped_type, "supports_validation", None)
+ if (
+ inspect.isroutine(wrapped_type)
+ or supports_validation is not None
+ or wrapped_type in _types_supporting_validation
+ ):
if inspect.isroutine(supports_validation):
validator = supports_validation
else:
validator = wrapped_type
else:
+
def validator_func(x):
if not isinstance(x, wrapped_type):
raise TypeError(wrapped_type, x)
return x
+
validator = validator_func
return validator
def wrapped_property(json_name, wrapped_type, validator=None, doc=None):
validator = _normalize_validator(wrapped_type, validator)
- return property(fget=lambda self: self._get_wrapped(json_name, wrapped_type),
- fset=lambda self, value: self._set_wrapped(json_name, value, validator),
- doc=doc)
+ return property(
+ fget=lambda self: self._get_wrapped(json_name, wrapped_type),
+ fset=lambda self, value: self._set_wrapped(json_name, value, validator),
+ doc=doc,
+ )
def array_wrapper(dtype, shape=None):
if shape is not None:
if isinstance(shape, numbers.Number):
- shape = (shape, )
+ shape = (shape,)
else:
shape = tuple(shape)
@@ -141,9 +154,10 @@ def wrapper(value, _readonly=False):
value.setflags(write=False)
if shape is not None:
if len(shape) != len(value.shape) or any(
- expected_size is not None and expected_size != actual_size
- for expected_size, actual_size in zip(shape, value.shape)):
- raise ValueError('expected shape', shape)
+ expected_size is not None and expected_size != actual_size
+ for expected_size, actual_size in zip(shape, value.shape)
+ ):
+ raise ValueError("expected shape", shape)
return value
wrapper.supports_readonly = True
@@ -151,7 +165,7 @@ def wrapper(value, _readonly=False):
def text_type(value):
- return six.text_type(value)
+ return str(value)
def optional(wrapper, default_value=None, validator=None):
@@ -160,7 +174,7 @@ def modified_wrapper(value, **kwargs):
return default_value
return wrapper(value, **kwargs)
- if hasattr(wrapper, 'supports_readonly'):
+ if hasattr(wrapper, "supports_readonly"):
modified_wrapper.supports_readonly = True
validator = _normalize_validator(wrapper, validator)
@@ -173,75 +187,89 @@ def modified_validator(value, **kwargs):
modified_wrapper.supports_validation = modified_validator
return modified_wrapper
-class MapBase(object):
+
+class MapBase:
__slots__ = ()
pass
-def typed_string_map(wrapped_type, validator=None):
- validator = _normalize_validator(wrapped_type, validator)
- class Map(JsonObjectWrapper, MapBase):
- supports_validation = True
- __slots__ = ()
- def __init__(self, json_data=None, _readonly=False):
- if isinstance(json_data, MapBase):
- json_data = json_data.to_json()
- elif json_data is not None:
- new_map = {}
- for k, v in six.viewitems(json_data):
- validator(v)
- new_map[k] = to_json(v)
- json_data = new_map
- super(Map, self).__init__(json_data, _readonly=_readonly)
+class TypedStringMap(Generic[_T], JsonObjectWrapper, MapBase):
+ validator: ClassVar[Callable[[Any], Any]]
+ wrapped_type: ClassVar[Callable[[Any], Any]]
+ supports_validation = True
+ __slots__ = ()
- def clear(self):
- with self._lock:
- self._cached_wrappers.clear()
- self._json_data.clear()
+ def __init__(self, json_data=None, _readonly=False):
+ validator = type(self).validator
+ if isinstance(json_data, MapBase):
+ json_data = json_data.to_json()
+ elif json_data is not None:
+ new_map = {}
+ for k, v in json_data.items():
+ validator(v)
+ new_map[k] = to_json(v)
+ json_data = new_map
+ super().__init__(json_data, _readonly=_readonly)
+
+ def clear(self):
+ with self._lock:
+ self._cached_wrappers.clear()
+ self._json_data.clear()
- def keys(self):
- return six.viewkeys(self._json_data)
+ def keys(self):
+ return self._json_data.keys()
- def iteritems(self):
- for key in self:
- yield (key, self[key])
+ def iteritems(self):
+ for key in self:
+ yield (key, self[key])
- def itervalues(self):
- for key in self:
- yield self[key]
+ def itervalues(self):
+ for key in self:
+ yield self[key]
- def get(self, key, default_value=None):
- with self._lock:
- if key in self._json_data:
- return self[key]
- return default_value
+ def get(self, key: str, default_value=None):
+ with self._lock:
+ if key in self._json_data:
+ return self[key]
+ return default_value
- def __len__(self):
- return len(self._json_data)
+ def __len__(self):
+ return len(self._json_data)
- def __contains__(self, key):
- return key in self._json_data
+ def __contains__(self, key):
+ return key in self._json_data
- def __getitem__(self, key):
- with self._lock:
- if key not in self._json_data:
- raise KeyError(key)
- return self._get_wrapped(key, wrapped_type)
+ def __getitem__(self, key):
+ with self._lock:
+ if key not in self._json_data:
+ raise KeyError(key)
+ return self._get_wrapped(key, type(self).wrapped_type)
- def __setitem__(self, key, value):
- with self._lock:
- self._set_wrapped(key, value, validator)
- self._json_data[key] = None # placeholder
+ def __setitem__(self, key, value):
+ with self._lock:
+ self._set_wrapped(key, value, type(self).validator)
+ self._json_data[key] = None # placeholder
- def __delitem__(self, key):
- if self._readonly:
- raise AttributeError
- with self._lock:
- del self._json_data[key]
- self._cached_wrappers.pop(key, None)
+ def __delitem__(self, key):
+ if self._readonly:
+ raise AttributeError
+ with self._lock:
+ del self._json_data[key]
+ self._cached_wrappers.pop(key, None)
- def __iter__(self):
- return iter(self._json_data)
+ def __iter__(self):
+ return iter(self._json_data)
+
+
+def typed_string_map(
+ wrapped_type: Callable[[Any], _T], validator=None
+) -> type[TypedStringMap[_T]]:
+ _wrapped_type = wrapped_type
+ _validator = _normalize_validator(wrapped_type, validator)
+
+ class Map(TypedStringMap):
+ wrapped_type = _wrapped_type
+ validator = _validator
return Map
@@ -259,12 +287,12 @@ def __init__(self, json_data=None, _readonly=False):
json_data = json_data.to_json()
elif json_data is not None:
new_map = {}
- for k, v in six.viewitems(json_data):
+ for k, v in json_data.items():
key_validator(k)
value_validator(v)
new_map[str(k)] = to_json(v)
json_data = new_map
- super(Map, self).__init__(json_data, _readonly=_readonly)
+ super().__init__(json_data, _readonly=_readonly)
def clear(self):
with self._lock:
@@ -272,7 +300,7 @@ def clear(self):
self._json_data.clear()
def keys(self):
- return [key_validator(k) for k in six.viewkeys(self._json_data)]
+ return [key_validator(k) for k in self._json_data.keys()]
def iteritems(self):
for key in self:
@@ -306,7 +334,7 @@ def __setitem__(self, key, value):
key = str(key)
with self._lock:
self._set_wrapped(key, value, value_validator)
- self._json_data[key] = None # placeholder
+ self._json_data[key] = None # placeholder
def __delitem__(self, key):
if self._readonly:
@@ -330,104 +358,127 @@ def segments():
class Map(typed_map(key_type, value_type)):
def to_json(self):
- return [segment if visible else "!" + segment for segment, visible in self._json_data.items()]
+ return [
+ segment if visible else "!" + segment
+ for segment, visible in self._json_data.items()
+ ]
def __init__(self, json_data=None, _readonly=False):
if json_data is None:
json_data = dict()
else:
json_data = dict(
- (key_type(v[1:]), False) if str(v).startswith('!') else (key_type(v), True)
- for v in json_data)
- super(Map, self).__init__(json_data, _readonly=_readonly)
+ (key_type(v[1:]), False)
+ if str(v).startswith("!")
+ else (key_type(v), True)
+ for v in json_data
+ )
+ super().__init__(json_data, _readonly=_readonly)
def __setitem__(self, key, value):
key = str(key)
with self._lock:
self._set_wrapped(key, value, value_validator)
- self._json_data[key] = value # using the value
+ self._json_data[key] = value # using the value
return Map
-def typed_set(wrapped_type):
- def wrapper(x, _readonly=False):
+
+def typed_set(wrapped_type: Callable[[Any], _T]):
+ def wrapper(x, _readonly=False) -> Callable[[Any], Union[set[_T], frozenset[_T]]]:
set_type = frozenset if _readonly else set
- kwargs = dict()
- if hasattr(wrapped_type, 'supports_readonly'):
+ kwargs: dict[str, Any] = dict()
+ if hasattr(wrapped_type, "supports_readonly"):
kwargs.update(_readonly=True)
if x is None:
return set_type()
return set_type(wrapped_type(v, **kwargs) for v in x)
- wrapper.supports_readonly = True
+
+ wrapper.supports_readonly = True # type: ignore[attr-defined]
return wrapper
-def typed_list(wrapped_type, validator=None):
- validator = _normalize_validator(wrapped_type, validator)
- class TypedList(object):
- supports_readonly = True
- supports_validation = True
- __slots__ = ('_readonly', '_data')
- def __init__(self, json_data=None, _readonly=False):
- if json_data is None:
- json_data = []
- if not isinstance(json_data, (list, tuple, np.ndarray)):
- raise ValueError
- self._readonly = _readonly
- self._data = [validator(x) for x in json_data]
+class TypedList(Generic[_T]):
+ supports_readonly = True
+ supports_validation = True
+ __slots__ = ("_readonly", "_data")
+ validator: ClassVar[Callable[[Any], Any]]
- def __len__(self):
- return len(self._data)
+ _readonly: bool
+ _data: list[_T]
- def __getitem__(self, key):
- return self._data[key]
+ def __init__(self, json_data=None, _readonly=False):
+ if json_data is None:
+ json_data = []
+ if not isinstance(json_data, (list, tuple, np.ndarray)):
+ raise ValueError
+ self._readonly = _readonly
+ validator = type(self).validator
+ self._data = [validator(x) for x in json_data]
- def __delitem__(self, key):
- if self._readonly:
- raise AttributeError
- del self._data[key]
+ def __len__(self):
+ return len(self._data)
- def __setitem__(self, key, value):
- if self._readonly:
- raise AttributeError
- if isinstance(key, slice):
- values = [validator(x) for x in value]
- self._data[key] = values
- else:
- value = validator(x)
- self._data[key] = value
+ def __getitem__(self, key):
+ return self._data[key]
- def __iter__(self):
- return iter(self._data)
+ def __delitem__(self, key):
+ if self._readonly:
+ raise AttributeError
+ del self._data[key]
- def append(self, x):
- if self._readonly:
- raise AttributeError
- x = validator(x)
- self._data.append(x)
+ def __setitem__(self, key, value):
+ if self._readonly:
+ raise AttributeError
+ if isinstance(key, slice):
+ values = [type(self).validator(x) for x in value]
+ self._data[key] = values
+ else:
+ value = type(self).validator(value)
+ self._data[key] = value
+
+ def __iter__(self):
+ return iter(self._data)
- def extend(self, values):
- for x in values:
- self.append(x)
- def insert(self, index, x):
- x = validator(x)
- self._data.insert(index, x)
+ def append(self, x):
+ if self._readonly:
+ raise AttributeError
+ x = type(self).validator(x)
+ self._data.append(x)
- def pop(self, index=-1):
- return self._data.pop(index)
+ def extend(self, values):
+ for x in values:
+ self.append(x)
- def to_json(self):
- return [to_json(x) for x in self._data]
+ def insert(self, index, x):
+ x = type(self).validator(x)
+ self._data.insert(index, x)
+
+ def pop(self, index=-1):
+ return self._data.pop(index)
+
+ def to_json(self):
+ return [to_json(x) for x in self._data]
+
+ def __deepcopy__(self, memo):
+ return type(self)(copy.deepcopy(self.to_json(), memo))
+
+ def __repr__(self):
+ return encode_json_for_repr(self.to_json())
+
+
+def typed_list(
+ wrapped_type: Callable[[Any], _T], validator=None
+) -> type[TypedList[_T]]:
+ val = _normalize_validator(wrapped_type, validator)
- def __deepcopy__(self, memo):
- return type(self)(copy.deepcopy(self.to_json(), memo))
+ class DerivedTypedList(TypedList):
+ validator = val
- def __repr__(self):
- return encode_json_for_repr(self.to_json())
- return TypedList
+ return DerivedTypedList
def number_or_string(value):
- if (not isinstance(value, numbers.Real) and not isinstance(value, six.text_type)):
+ if not isinstance(value, numbers.Real) and not isinstance(value, str):
raise TypeError
return value
diff --git a/python/neuroglancer/local_volume.py b/python/neuroglancer/local_volume.py
index 21df6e235a..fe467791b5 100644
--- a/python/neuroglancer/local_volume.py
+++ b/python/neuroglancer/local_volume.py
@@ -12,80 +12,89 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import, division, print_function
-import collections
import math
import threading
import numpy as np
-import six
+
try:
import tensorstore as ts
except ImportError:
ts = None
-from . import downsample, downsample_scales
+from . import downsample, downsample_scales, trackable_state
from .chunks import encode_jpeg, encode_npz, encode_raw
from .coordinate_space import CoordinateSpace
-from . import trackable_state
from .random_token import make_random_token
class MeshImplementationNotAvailable(Exception):
pass
+
class MeshesNotSupportedForVolume(Exception):
pass
+
class InvalidObjectIdForMesh(Exception):
pass
+
class LocalVolume(trackable_state.ChangeNotifier):
- def __init__(self,
- data,
- dimensions=None,
- volume_type=None,
- voxel_offset=None,
- encoding='npz',
- max_voxels_per_chunk_log2=None,
- mesh_options=None,
- downsampling='3d',
- chunk_layout=None,
- max_downsampling=downsample_scales.DEFAULT_MAX_DOWNSAMPLING,
- max_downsampled_size=downsample_scales.DEFAULT_MAX_DOWNSAMPLED_SIZE,
- max_downsampling_scales=downsample_scales.DEFAULT_MAX_DOWNSAMPLING_SCALES):
+ def __init__(
+ self,
+ data,
+ dimensions=None,
+ volume_type=None,
+ voxel_offset=None,
+ encoding="npz",
+ max_voxels_per_chunk_log2=None,
+ mesh_options=None,
+ downsampling="3d",
+ chunk_layout=None,
+ max_downsampling=downsample_scales.DEFAULT_MAX_DOWNSAMPLING,
+ max_downsampled_size=downsample_scales.DEFAULT_MAX_DOWNSAMPLED_SIZE,
+ max_downsampling_scales=downsample_scales.DEFAULT_MAX_DOWNSAMPLING_SCALES,
+ ):
"""Initializes a LocalVolume.
@param data: Source data.
- @param downsampling: '3d' to use isotropic downsampling, '2d' to downsample separately in
- XY, XZ, and YZ, None to use no downsampling.
+ @param downsampling: '3d' to use isotropic downsampling, '2d' to
+ downsample separately in XY, XZ, and YZ, None to use no
+ downsampling.
+
+ @param max_downsampling: Maximum amount by which on-the-fly downsampling
+ may reduce the volume of a chunk. For example, 4x4x4 downsampling
+ reduces the volume by 64.
- @param max_downsampling: Maximum amount by which on-the-fly downsampling may reduce the
- volume of a chunk. For example, 4x4x4 downsampling reduces the volume by 64.
+ @param volume_type: either 'image' or 'segmentation'. If not specified,
+ guessed from the data type.
- @param volume_type: either 'image' or 'segmentation'. If not specified, guessed from the
- data type.
+ @param voxel_size: Sequence [x, y, z] of floats. Specifies the voxel
+ size.
- @param voxel_size: Sequence [x, y, z] of floats. Specifies the voxel size.
+ @param mesh_options: A dict with the following keys specifying options
+ for mesh simplification for 'segmentation' volumes:
- @param mesh_options: A dict with the following keys specifying options for mesh
- simplification for 'segmentation' volumes:
+ - max_quadrics_error: float. Edge collapses with a larger
+ associated quadrics error than this amount are prohibited.
+ Set this to a negative number to disable mesh simplification,
+ and just use the original mesh produced by the marching cubes
+ algorithm. Defaults to 1e6. The effect of this value depends
+ on the voxel_size.
- - max_quadrics_error: float. Edge collapses with a larger associated quadrics error
- than this amount are prohibited. Set this to a negative number to disable mesh
- simplification, and just use the original mesh produced by the marching cubes
- algorithm. Defaults to 1e6. The effect of this value depends on the voxel_size.
+ - max_normal_angle_deviation: float. Edge collapses that change
+ a triangle normal by more than this angle are prohibited. The
+ angle is specified in degrees. Defaults to 90.
- - max_normal_angle_deviation: float. Edge collapses that change a triangle normal
- by more than this angle are prohibited. The angle is specified in degrees.
- Defaults to 90.
+ - lock_boundary_vertices: bool. Retain all vertices along mesh
+ surface boundaries, which can only occur at the boundary of
+ the volume. Defaults to true.
- - lock_boundary_vertices: bool. Retain all vertices along mesh surface boundaries,
- which can only occur at the boundary of the volume. Defaults to true.
"""
- super(LocalVolume, self).__init__()
+ super().__init__()
self.token = make_random_token()
if ts is not None and isinstance(data, ts.TensorStore):
self.data = TensorStoreDataWrapper(data)
@@ -100,27 +109,31 @@ def __init__(self,
scales=self.data.scales,
)
if rank != dimensions.rank:
- raise ValueError('rank of data (%d) must match rank of coordinate space (%d)' %
- (rank, dimensions.rank))
+ raise ValueError(
+ "rank of data (%d) must match rank of coordinate space (%d)"
+ % (rank, dimensions.rank)
+ )
if voxel_offset is None:
voxel_offset = np.array(self.data.origin, dtype=np.int64)
else:
voxel_offset = np.array(voxel_offset, dtype=np.int64)
if voxel_offset.shape != (rank,):
- raise ValueError('voxel_offset must have shape of (%d,)' % (rank,))
+ raise ValueError("voxel_offset must have shape of (%d,)" % (rank,))
self.voxel_offset = voxel_offset
self.dimensions = dimensions
self.data_type = np.dtype(self.data.dtype).name
- if self.data_type == 'float64':
- self.data_type = 'float32'
+ if self.data_type == "float64":
+ self.data_type = "float32"
self.encoding = encoding
if volume_type is None:
- if self.rank == 3 and (self.data_type == 'uint16' or
- self.data_type == 'uint32' or
- self.data_type == 'uint64'):
- volume_type = 'segmentation'
+ if self.rank == 3 and (
+ self.data_type == "uint16"
+ or self.data_type == "uint32"
+ or self.data_type == "uint64"
+ ):
+ volume_type = "segmentation"
else:
- volume_type = 'image'
+ volume_type = "image"
self.volume_type = volume_type
self._mesh_generator = None
@@ -132,10 +145,10 @@ def __init__(self,
self.downsampling_layout = downsampling
if chunk_layout is None:
- if downsampling == '2d':
- chunk_layout = 'flat'
+ if downsampling == "2d":
+ chunk_layout = "flat"
else:
- chunk_layout = 'isotropic'
+ chunk_layout = "isotropic"
self.chunk_layout = chunk_layout
self.max_downsampling = max_downsampling
@@ -143,59 +156,75 @@ def __init__(self,
self.max_downsampling_scales = max_downsampling_scales
def info(self):
- info = dict(dataType=self.data_type,
- encoding=self.encoding,
- generation=self.change_count,
- coordinateSpace=self.dimensions.to_json(),
- shape=self.shape,
- volumeType=self.volume_type,
- voxelOffset=self.voxel_offset,
- chunkLayout=self.chunk_layout,
- downsamplingLayout=self.downsampling_layout,
- maxDownsampling=None if math.isinf(self.max_downsampling) else self.max_downsampling,
- maxDownsampledSize=None if math.isinf(self.max_downsampled_size) else self.max_downsampled_size,
- maxDownsamplingScales=None if math.isinf(self.max_downsampling_scales) else self.max_downsampling_scales,
+ info = dict(
+ dataType=self.data_type,
+ encoding=self.encoding,
+ generation=self.change_count,
+ coordinateSpace=self.dimensions.to_json(),
+ shape=self.shape,
+ volumeType=self.volume_type,
+ voxelOffset=self.voxel_offset,
+ chunkLayout=self.chunk_layout,
+ downsamplingLayout=self.downsampling_layout,
+ maxDownsampling=None
+ if math.isinf(self.max_downsampling)
+ else self.max_downsampling,
+ maxDownsampledSize=None
+ if math.isinf(self.max_downsampled_size)
+ else self.max_downsampled_size,
+ maxDownsamplingScales=None
+ if math.isinf(self.max_downsampling_scales)
+ else self.max_downsampling_scales,
)
if self.max_voxels_per_chunk_log2 is not None:
- info['maxVoxelsPerChunkLog2'] = self.max_voxels_per_chunk_log2
+ info["maxVoxelsPerChunkLog2"] = self.max_voxels_per_chunk_log2
return info
def get_encoded_subvolume(self, data_format, start, end, scale_key):
rank = self.rank
if len(start) != rank or len(end) != rank:
- raise ValueError('Invalid request')
- downsample_factor = np.array(scale_key.split(','), dtype=np.int64)
- if (len(downsample_factor) != rank or np.any(downsample_factor < 1)
+ raise ValueError("Invalid request")
+ downsample_factor = np.array(scale_key.split(","), dtype=np.int64)
+ if (
+ len(downsample_factor) != rank
+ or np.any(downsample_factor < 1)
or np.any(downsample_factor > self.max_downsampling)
- or np.prod(downsample_factor) > self.max_downsampling):
- raise ValueError('Invalid downsampling factor.')
+ or np.prod(downsample_factor) > self.max_downsampling
+ ):
+ raise ValueError("Invalid downsampling factor.")
downsampled_shape = np.cast[np.int64](np.ceil(self.shape / downsample_factor))
if np.any(end < start) or np.any(start < 0) or np.any(end > downsampled_shape):
- raise ValueError('Out of bounds data request.')
+ raise ValueError("Out of bounds data request.")
orig_data_shape = self.data.shape
- indexing_expr = tuple(np.s_[start[i] * downsample_factor[i]:min(orig_data_shape[i], end[i] * downsample_factor[i])]
- for i in range(rank))
+ indexing_expr = tuple(
+ np.s_[
+ start[i] * downsample_factor[i] : min(
+ orig_data_shape[i], end[i] * downsample_factor[i]
+ )
+ ]
+ for i in range(rank)
+ )
subvol = np.array(self.data[indexing_expr], copy=False)
- if subvol.dtype == 'float64':
+ if subvol.dtype == "float64":
subvol = np.cast[np.float32](subvol)
if np.any(downsample_factor != 1):
- if self.volume_type == 'image':
+ if self.volume_type == "image":
subvol = downsample.downsample_with_averaging(subvol, downsample_factor)
else:
subvol = downsample.downsample_with_striding(subvol, downsample_factor)
- content_type = 'application/octet-stream'
- if data_format == 'jpeg':
+ content_type = "application/octet-stream"
+ if data_format == "jpeg":
data = encode_jpeg(subvol)
- content_type = 'image/jpeg'
- elif data_format == 'npz':
+ content_type = "image/jpeg"
+ elif data_format == "npz":
data = encode_npz(subvol)
- elif data_format == 'raw':
+ elif data_format == "raw":
data = encode_raw(subvol)
else:
- raise ValueError('Invalid data format requested.')
+ raise ValueError("Invalid data format requested.")
return data, content_type
def get_object_mesh(self, object_id):
@@ -221,16 +250,25 @@ def _get_mesh_generator(self):
from . import _neuroglancer
except ImportError:
raise MeshImplementationNotAvailable()
- if not (self.rank == 3 and
- (self.data_type == 'uint8' or self.data_type == 'uint16' or
- self.data_type == 'uint32' or self.data_type == 'uint64')):
+ if not (
+ self.rank == 3
+ and (
+ self.data_type == "uint8"
+ or self.data_type == "uint16"
+ or self.data_type == "uint32"
+ or self.data_type == "uint64"
+ )
+ ):
raise MeshesNotSupportedForVolume()
pending_obj = object()
self._mesh_generator_pending = pending_obj
data = self.data
new_mesh_generator = _neuroglancer.OnDemandObjectMeshGenerator(
data.transpose(),
- self.dimensions.scales, np.zeros(3), **self._mesh_options)
+ self.dimensions.scales,
+ np.zeros(3),
+ **self._mesh_options,
+ )
with self._mesh_generator_lock:
if self._mesh_generator_pending is not pending_obj:
continue
@@ -253,15 +291,16 @@ def invalidate(self):
self._mesh_generator = None
self._dispatch_changed_callbacks()
+
class DataWrapper:
"""Wraps data for LocalVolume."""
def __init__(self, data):
self._data = data
- self._default_labels = ['d%d' % d for d in range(self.rank)]
+ self._default_labels = ["d%d" % d for d in range(self.rank)]
self._default_origin = [0] * self.rank
self._default_scales = [1] * self.rank
- self._default_units = [''] * self.rank
+ self._default_units = [""] * self.rank
def __getitem__(self, item):
return self._data.__getitem__(item)
diff --git a/python/neuroglancer/screenshot.py b/python/neuroglancer/screenshot.py
index 92de6ac8af..a1f28ce1a7 100644
--- a/python/neuroglancer/screenshot.py
+++ b/python/neuroglancer/screenshot.py
@@ -12,12 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import os
-class ScreenshotSaver(object):
+class ScreenshotSaver:
def __init__(self, viewer, directory):
self.viewer = viewer
self.directory = directory
@@ -28,7 +27,7 @@ def __init__(self, viewer, directory):
self.index = 0
def get_path(self, index):
- return os.path.join(self.directory, '%07d.png' % index)
+ return os.path.join(self.directory, "%07d.png" % index)
def get_next_path(self, index=None):
if index is None:
@@ -39,7 +38,7 @@ def capture(self, index=None):
s = self.viewer.screenshot()
increment_index = index is None
index, path = self.get_next_path(index)
- with open(path, 'wb') as f:
+ with open(path, "wb") as f:
f.write(s.screenshot.image)
if increment_index:
self.index += 1
diff --git a/python/neuroglancer/segment_colors.py b/python/neuroglancer/segment_colors.py
index 377ba8ed6d..2751b77c11 100644
--- a/python/neuroglancer/segment_colors.py
+++ b/python/neuroglancer/segment_colors.py
@@ -14,72 +14,78 @@
import math
-def hash_function(state,value):
- """ Python implementation of hashCombine() function
+
+def hash_function(state, value):
+ """Python implementation of hashCombine() function
in src/neuroglancer/gpu_hash/hash_function.ts,
a modified murmur hash
"""
- k1 = 0xcc9e2d51
- k2 = 0x1b873593
- state = state & 0xffffffff
- value = (value * k1) & 0xffffffff
- value = ((value << 15) | value >> 17) & 0xffffffff
- value = (value * k2) & 0xffffffff
- state = (state ^ value) & 0xffffffff
- state = (( state << 13) | state >> 19) & 0xffffffff
- state = (( state * 5) + 0xe6546b64) & 0xffffffff
+ k1 = 0xCC9E2D51
+ k2 = 0x1B873593
+ state = state & 0xFFFFFFFF
+ value = (value * k1) & 0xFFFFFFFF
+ value = ((value << 15) | value >> 17) & 0xFFFFFFFF
+ value = (value * k2) & 0xFFFFFFFF
+ state = (state ^ value) & 0xFFFFFFFF
+ state = ((state << 13) | state >> 19) & 0xFFFFFFFF
+ state = ((state * 5) + 0xE6546B64) & 0xFFFFFFFF
return state
-def hsv_to_rgb(h,s,v):
- """ Convert H,S,V values to RGB values.
- Python implementation of hsvToRgb in src/neuroglancer/util/colorspace.ts """
- h*=6
+
+def hsv_to_rgb(h, s, v):
+ """Convert H,S,V values to RGB values.
+ Python implementation of hsvToRgb in src/neuroglancer/util/colorspace.ts"""
+ h *= 6
hue_index = math.floor(h)
remainder = h - hue_index
- val1 = v*(1-s)
- val2 = v*(1-(s*remainder))
- val3 = v*(1-(s*(1-remainder)))
+ val1 = v * (1 - s)
+ val2 = v * (1 - (s * remainder))
+ val3 = v * (1 - (s * (1 - remainder)))
hue_remainder = hue_index % 6
if hue_remainder == 0:
- return (v,val3,val1)
+ return (v, val3, val1)
elif hue_remainder == 1:
- return (val2,v,val1)
+ return (val2, v, val1)
elif hue_remainder == 2:
- return (val1,v,val3)
+ return (val1, v, val3)
elif hue_remainder == 3:
- return (val1,val2,v)
+ return (val1, val2, v)
elif hue_remainder == 4:
- return (val3,val1,v)
- elif hue_remainder == 5:
- return (v,val1,val2)
+ return (val3, val1, v)
+ elif hue_remainder == 5:
+ return (v, val1, val2)
+
def pack_color(rgb_vec):
- """ Returns an integer formed
+ """Returns an integer formed
by concatenating the channels of the input color vector.
Python implementation of packColor in src/neuroglancer/util/color.ts
"""
result = 0
for i in range(len(rgb_vec)):
- result = ((result << 8) & 0xffffffff) + min(255,max(0,round(rgb_vec[i]*255)))
+ result = ((result << 8) & 0xFFFFFFFF) + min(
+ 255, max(0, round(rgb_vec[i] * 255))
+ )
return result
-def hex_string_from_segment_id(color_seed,segment_id):
- """ Return the hex color string for a segment
- given a color seed and the segment id """
- segment_id = int(segment_id) # necessary since segment_id is 64 bit originally
- result = hash_function(state=color_seed,value=segment_id)
+
+def hex_string_from_segment_id(color_seed, segment_id):
+ """Return the hex color string for a segment
+ given a color seed and the segment id"""
+ segment_id = int(segment_id) # necessary since segment_id is 64 bit originally
+ result = hash_function(state=color_seed, value=segment_id)
newvalue = segment_id >> 32
- result2 = hash_function(state=result,value=newvalue)
- c0 = (result2 & 0xFF) / 255.
- c1 = ((result2 >> 8) & 0xFF) / 255.;
+ result2 = hash_function(state=result, value=newvalue)
+ c0 = (result2 & 0xFF) / 255.0
+ c1 = ((result2 >> 8) & 0xFF) / 255.0
h = c0
- s = 0.5 + 0.5 * c1
+ s = 0.5 + 0.5 * c1
v = 1.0
- rgb=hsv_to_rgb(h,s,v)
+ rgb = hsv_to_rgb(h, s, v)
packed_color = pack_color(rgb_vec=rgb)
- hex_string = format(packed_color, 'x')
+ hex_string = format(packed_color, "x")
""" Zero pad the hex string if less than 6 characeters """
if len(hex_string) < 6:
- hex_string = '0'*(6-len(hex_string)) + hex_string
- hex_string = '#' + hex_string
- return hex_string
\ No newline at end of file
+ hex_string = "0" * (6 - len(hex_string)) + hex_string
+ hex_string = "#" + hex_string
+ return hex_string
diff --git a/python/neuroglancer/server.py b/python/neuroglancer/server.py
index bb7dbbb87c..3a994bea83 100644
--- a/python/neuroglancer/server.py
+++ b/python/neuroglancer/server.py
@@ -16,7 +16,6 @@
import concurrent.futures
import json
import multiprocessing
-import re
import socket
import sys
import threading
@@ -28,43 +27,47 @@
import tornado.platform.asyncio
import tornado.web
-from . import async_util
-from . import local_volume, static
-from . import skeleton
-from .json_utils import json_encoder_default, encode_json
+from . import async_util, local_volume, skeleton, static
+from .json_utils import encode_json, json_encoder_default
from .random_token import make_random_token
from .trackable_state import ConcurrentModificationError
-INFO_PATH_REGEX = r'^/neuroglancer/info/(?P[^/]+)$'
-SKELETON_INFO_PATH_REGEX = r'^/neuroglancer/skeletoninfo/(?P[^/]+)$'
+INFO_PATH_REGEX = r"^/neuroglancer/info/(?P[^/]+)$"
+SKELETON_INFO_PATH_REGEX = r"^/neuroglancer/skeletoninfo/(?P[^/]+)$"
-DATA_PATH_REGEX = r'^/neuroglancer/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[0-9]+(?:,[0-9]+)*)/(?P[0-9]+(?:,[0-9]+)*)$'
+DATA_PATH_REGEX = r"^/neuroglancer/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[0-9]+(?:,[0-9]+)*)/(?P[0-9]+(?:,[0-9]+)*)$"
-SKELETON_PATH_REGEX = r'^/neuroglancer/skeleton/(?P[^/]+)/(?P[0-9]+)$'
+SKELETON_PATH_REGEX = r"^/neuroglancer/skeleton/(?P[^/]+)/(?P[0-9]+)$"
-MESH_PATH_REGEX = r'^/neuroglancer/mesh/(?P[^/]+)/(?P[0-9]+)$'
+MESH_PATH_REGEX = r"^/neuroglancer/mesh/(?P[^/]+)/(?P[0-9]+)$"
-STATIC_PATH_REGEX = r'^/v/(?P[^/]+)/(?P(?:[a-zA-Z0-9_\-][a-zA-Z0-9_\-.]*)?)$'
+STATIC_PATH_REGEX = (
+ r"^/v/(?P[^/]+)/(?P(?:[a-zA-Z0-9_\-][a-zA-Z0-9_\-.]*)?)$"
+)
-ACTION_PATH_REGEX = r'^/action/(?P[^/]+)$'
+ACTION_PATH_REGEX = r"^/action/(?P[^/]+)$"
-VOLUME_INFO_RESPONSE_PATH_REGEX = r'^/volume_response/(?P[^/]+)/(?P[^/]+)/info$'
+VOLUME_INFO_RESPONSE_PATH_REGEX = (
+ r"^/volume_response/(?P[^/]+)/(?P[^/]+)/info$"
+)
-VOLUME_CHUNK_RESPONSE_PATH_REGEX = r'^/volume_response/(?P[^/]+)/(?P[^/]+)/chunk$'
+VOLUME_CHUNK_RESPONSE_PATH_REGEX = (
+ r"^/volume_response/(?P[^/]+)/(?P[^/]+)/chunk$"
+)
-EVENTS_PATH_REGEX = r'^/events/(?P[^/]+)$'
+EVENTS_PATH_REGEX = r"^/events/(?P[^/]+)$"
-SET_STATE_PATH_REGEX = r'^/state/(?P[^/]+)$'
+SET_STATE_PATH_REGEX = r"^/state/(?P[^/]+)$"
-CREDENTIALS_PATH_REGEX = r'^/credentials/(?P[^/]+)$'
+CREDENTIALS_PATH_REGEX = r"^/credentials/(?P[^/]+)$"
global_static_content_source = None
-global_server_args = dict(bind_address='127.0.0.1', bind_port=0)
+global_server_args = dict(bind_address="127.0.0.1", bind_port=0)
debug = False
-_IS_GOOGLE_COLAB = 'google.colab' in sys.modules
+_IS_GOOGLE_COLAB = "google.colab" in sys.modules
def _get_server_url(bind_address: str, port: int) -> str:
@@ -74,33 +77,41 @@ def _get_server_url(bind_address: str, port: int) -> str:
def _get_regular_server_url(bind_address: str, port: int) -> str:
- if bind_address == '0.0.0.0' or bind_address == '::':
+ if bind_address == "0.0.0.0" or bind_address == "::":
hostname = socket.getfqdn()
else:
hostname = bind_address
- return f'http://{hostname}:{port}'
+ return f"http://{hostname}:{port}"
def _get_colab_server_url(port: int) -> str:
import google.colab.output
- return google.colab.output.eval_js(f'google.colab.kernel.proxyPort({port})')
+
+ return google.colab.output.eval_js(f"google.colab.kernel.proxyPort({port})")
class Server(async_util.BackgroundTornadoServer):
- def __init__(self, bind_address='127.0.0.1', bind_port=0):
- super().__init__(daemon = True)
+ def __init__(self, bind_address="127.0.0.1", bind_port=0):
+ super().__init__(daemon=True)
self.viewers = weakref.WeakValueDictionary()
self._bind_address = bind_address
self._bind_port = bind_port
self.token = make_random_token()
self.executor = concurrent.futures.ThreadPoolExecutor(
- max_workers=multiprocessing.cpu_count())
+ max_workers=multiprocessing.cpu_count()
+ )
def _attempt_to_start_server(self):
def log_function(handler):
if debug:
- print("%d %s %.2fs" %
- (handler.get_status(), handler.request.uri, handler.request.request_time()))
+ print(
+ "%d %s %.2fs"
+ % (
+ handler.get_status(),
+ handler.request.uri,
+ handler.request.request_time(),
+ )
+ )
app = self.app = tornado.web.Application(
[
@@ -111,8 +122,16 @@ def log_function(handler):
(SKELETON_PATH_REGEX, SkeletonHandler, dict(server=self)),
(MESH_PATH_REGEX, MeshHandler, dict(server=self)),
(ACTION_PATH_REGEX, ActionHandler, dict(server=self)),
- (VOLUME_INFO_RESPONSE_PATH_REGEX, VolumeInfoResponseHandler, dict(server=self)),
- (VOLUME_CHUNK_RESPONSE_PATH_REGEX, VolumeChunkResponseHandler, dict(server=self)),
+ (
+ VOLUME_INFO_RESPONSE_PATH_REGEX,
+ VolumeInfoResponseHandler,
+ dict(server=self),
+ ),
+ (
+ VOLUME_CHUNK_RESPONSE_PATH_REGEX,
+ VolumeChunkResponseHandler,
+ dict(server=self),
+ ),
(EVENTS_PATH_REGEX, EventStreamHandler, dict(server=self)),
(SET_STATE_PATH_REGEX, SetStateHandler, dict(server=self)),
(CREDENTIALS_PATH_REGEX, CredentialsHandler, dict(server=self)),
@@ -120,13 +139,16 @@ def log_function(handler):
log_function=log_function,
# Set a large maximum message size to accommodate large screenshot
# messages.
- websocket_max_message_size=100 * 1024 * 1024)
+ websocket_max_message_size=100 * 1024 * 1024,
+ )
self.http_server = tornado.httpserver.HTTPServer(
app,
# Allow very large requests to accommodate large screenshots.
max_buffer_size=1024**3,
)
- sockets = tornado.netutil.bind_sockets(port=self._bind_port, address=self._bind_address)
+ sockets = tornado.netutil.bind_sockets(
+ port=self._bind_port, address=self._bind_address
+ )
self.http_server.add_sockets(sockets)
actual_port = sockets[0].getsockname()[1]
@@ -135,7 +157,9 @@ def log_function(handler):
global_static_content_source = static.get_default_static_content_source()
self.port = actual_port
self.server_url = _get_server_url(self._bind_address, actual_port)
- self.regular_server_url = _get_regular_server_url(self._bind_address, actual_port)
+ self.regular_server_url = _get_regular_server_url(
+ self._bind_address, actual_port
+ )
self._credentials_manager = None
def __enter__(self):
@@ -145,11 +169,11 @@ def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def get_volume(self, key):
- dot_index = key.find('.')
+ dot_index = key.find(".")
if dot_index == -1:
return None
viewer_token = key[:dot_index]
- volume_token = key[dot_index + 1:]
+ volume_token = key[dot_index + 1 :]
viewer = self.viewers.get(viewer_token)
if viewer is None:
return None
@@ -163,7 +187,10 @@ def initialize(self, server):
class StaticPathHandler(BaseRequestHandler):
def get(self, viewer_token, path):
- if viewer_token != self.server.token and viewer_token not in self.server.viewers:
+ if (
+ viewer_token != self.server.token
+ and viewer_token not in self.server.viewers
+ ):
self.send_error(404)
return
try:
@@ -171,7 +198,7 @@ def get(self, viewer_token, path):
except ValueError as e:
self.send_error(404, message=e.args[0])
return
- self.set_header('Content-type', content_type)
+ self.set_header("Content-type", content_type)
self.finish(data)
@@ -182,8 +209,10 @@ def post(self, viewer_token):
self.send_error(404)
return
action = json.loads(self.request.body)
- self.server.loop.call_soon(viewer.actions.invoke, action['action'], action['state'])
- self.finish('')
+ self.server.loop.call_soon(
+ viewer.actions.invoke, action["action"], action["state"]
+ )
+ self.finish("")
class VolumeInfoResponseHandler(BaseRequestHandler):
@@ -195,7 +224,7 @@ def post(self, viewer_token, request_id):
info = json.loads(self.request.body)
self.server.loop.call_soon(viewer._handle_volume_info_reply, request_id, info)
- self.finish('')
+ self.finish("")
class VolumeChunkResponseHandler(BaseRequestHandler):
@@ -205,10 +234,12 @@ def post(self, viewer_token, request_id):
self.send_error(404)
return
- params = json.loads(self.get_argument('p'))
+ params = json.loads(self.get_argument("p"))
data = self.request.body
- self.server.loop.call_soon(viewer._handle_volume_chunk_reply, request_id, params, data)
- self.finish('')
+ self.server.loop.call_soon(
+ viewer._handle_volume_chunk_reply, request_id, params, data
+ )
+ self.finish("")
class EventStreamStateWatcher:
@@ -225,13 +256,15 @@ def unregister(self):
def maybe_send_update(self, handler):
raw_state, generation = self.state.raw_state_and_generation
- if generation == self.last_generation: return False
- if generation.startswith(self._client_id + '/'): return False
+ if generation == self.last_generation:
+ return False
+ if generation.startswith(self._client_id + "/"):
+ return False
self.last_generation = generation
- msg = {'k': self.key, 's': raw_state, 'g': generation}
- handler.write(f'data: {encode_json(msg)}\n\n')
+ msg = {"k": self.key, "s": raw_state, "g": generation}
+ handler.write(f"data: {encode_json(msg)}\n\n")
if debug:
- print(f'data: {encode_json(msg)}\n\n')
+ print(f"data: {encode_json(msg)}\n\n")
return True
@@ -241,32 +274,41 @@ async def get(self, viewer_token: str):
if viewer is None:
self.send_error(404)
return
- self.set_header('content-type', 'text/event-stream')
- self.set_header('cache-control', 'no-cache')
+ self.set_header("content-type", "text/event-stream")
+ self.set_header("cache-control", "no-cache")
must_flush = True
wake_event = asyncio.Event()
self._wake_up = lambda: self.server.loop.call_soon_threadsafe(wake_event.set)
- client_id = self.get_query_argument('c')
+ client_id = self.get_query_argument("c")
+ if client_id is None:
+ raise tornado.web.HTTPError(400, "missing client_id")
self._closed = False
watchers = []
def watch(key: str, state):
+ last_generation = self.get_query_argument(f"g{key}")
+ if last_generation is None:
+ raise tornado.web.HTTPError(400, f"missing g{key}")
watchers.append(
- EventStreamStateWatcher(key=key,
- state=state,
- client_id=client_id,
- wake_up=self._wake_up,
- last_generation=self.get_query_argument(f'g{key}')))
-
- watch('c', viewer.config_state)
- if hasattr(viewer, 'shared_state'):
- watch('s', viewer.shared_state)
+ EventStreamStateWatcher(
+ key=key,
+ state=state,
+ client_id=client_id,
+ wake_up=self._wake_up,
+ last_generation=last_generation,
+ )
+ )
+
+ watch("c", viewer.config_state)
+ if hasattr(viewer, "shared_state"):
+ watch("s", viewer.shared_state)
try:
while True:
wake_event.clear()
- if self._closed: break
+ if self._closed:
+ break
try:
sent = False
for watcher in watchers:
@@ -290,7 +332,7 @@ def watch(key: str, state):
def on_connection_close(self):
if debug:
- print('connection closed')
+ print("connection closed")
super().on_connection_close()
self._wake_up()
self._closed = True
@@ -303,17 +345,19 @@ def post(self, viewer_token: str):
self.send_error(404)
return
msg = json.loads(self.request.body)
- prev_generation = msg['pg']
- generation = msg['g']
- state = msg['s']
- client_id = msg['c']
+ prev_generation = msg["pg"]
+ generation = msg["g"]
+ state = msg["s"]
+ client_id = msg["c"]
try:
- new_generation = viewer.set_state(state, f'{client_id}/{generation}', existing_generation=prev_generation)
- self.set_header('Content-type', 'application/json')
+ new_generation = viewer.set_state(
+ state, f"{client_id}/{generation}", existing_generation=prev_generation
+ )
+ self.set_header("Content-type", "application/json")
self.finish(json.dumps({"g": new_generation}))
except ConcurrentModificationError:
self.set_status(412)
- self.finish('')
+ self.finish("")
class CredentialsHandler(BaseRequestHandler):
@@ -327,19 +371,23 @@ async def post(self, viewer_token: str):
return
if self.server._credentials_manager is None:
from .default_credentials_manager import default_credentials_manager
+
self.server._credentials_manager = default_credentials_manager
msg = json.loads(self.request.body)
- invalid = msg.get('invalid')
- provider = self.server._credentials_manager.get(msg['key'], msg.get('parameters'))
+ invalid = msg.get("invalid")
+ provider = self.server._credentials_manager.get(
+ msg["key"], msg.get("parameters")
+ )
if provider is None:
self.send_error(400)
return
try:
credentials = await asyncio.wrap_future(provider.get(invalid))
- self.set_header('Content-type', 'application/json')
+ self.set_header("Content-type", "application/json")
self.finish(json.dumps(credentials))
- except:
+ except Exception:
import traceback
+
traceback.print_exc()
self.send_error(401)
@@ -364,8 +412,8 @@ def get(self, token):
class SubvolumeHandler(BaseRequestHandler):
async def get(self, data_format, token, scale_key, start, end):
- start_pos = np.array(start.split(','), dtype=np.int64)
- end_pos = np.array(end.split(','), dtype=np.int64)
+ start_pos = np.array(start.split(","), dtype=np.int64)
+ end_pos = np.array(end.split(","), dtype=np.int64)
vol = self.server.get_volume(token)
if vol is None or not isinstance(vol, local_volume.LocalVolume):
self.send_error(404)
@@ -373,15 +421,18 @@ async def get(self, data_format, token, scale_key, start, end):
try:
data, content_type = await asyncio.wrap_future(
- self.server.executor.submit(vol.get_encoded_subvolume,
- data_format=data_format,
- start=start_pos,
- end=end_pos,
- scale_key=scale_key))
+ self.server.executor.submit(
+ vol.get_encoded_subvolume,
+ data_format=data_format,
+ start=start_pos,
+ end=end_pos,
+ scale_key=scale_key,
+ )
+ )
except ValueError as e:
self.send_error(400, message=e.args[0])
return
- self.set_header('Content-type', content_type)
+ self.set_header("Content-type", content_type)
self.finish(data)
@@ -395,21 +446,22 @@ async def get(self, key, object_id):
try:
encoded_mesh = await asyncio.wrap_future(
- self.server.executor.submit(vol.get_object_mesh, object_id))
+ self.server.executor.submit(vol.get_object_mesh, object_id)
+ )
except local_volume.MeshImplementationNotAvailable:
- self.send_error(501, message='Mesh implementation not available')
+ self.send_error(501, message="Mesh implementation not available")
return
except local_volume.MeshesNotSupportedForVolume:
- self.send_error(405, message='Meshes not supported for volume')
+ self.send_error(405, message="Meshes not supported for volume")
return
except local_volume.InvalidObjectIdForMesh:
- self.send_error(404, message='Mesh not available for specified object id')
+ self.send_error(404, message="Mesh not available for specified object id")
return
except ValueError as e:
self.send_error(400, message=e.args[0])
return
- self.set_header('Content-type', 'application/octet-stream')
+ self.set_header("Content-type", "application/octet-stream")
self.finish(encoded_mesh)
@@ -429,26 +481,30 @@ def get_encoded_skeleton(skeletons, object_id):
try:
encoded_skeleton = await asyncio.wrap_future(
- self.server.executor.submit(get_encoded_skeleton, vol, object_id))
- except:
+ self.server.executor.submit(get_encoded_skeleton, vol, object_id)
+ )
+ except Exception as e:
self.send_error(500, message=e.args[0])
return
if encoded_skeleton is None:
- self.send_error(404, message='Skeleton not available for specified object id')
+ self.send_error(
+ 404, message="Skeleton not available for specified object id"
+ )
return
- self.set_header('Content-type', 'application/octet-stream')
+ self.set_header("Content-type", "application/octet-stream")
self.finish(encoded_skeleton)
global_server = None
_global_server_lock = threading.Lock()
+
def set_static_content_source(*args, **kwargs):
global global_static_content_source
global_static_content_source = static.get_static_content_source(*args, **kwargs)
-def set_server_bind_address(bind_address='127.0.0.1', bind_port=0):
+def set_server_bind_address(bind_address="127.0.0.1", bind_port=0):
global global_server_args
global_server_args = dict(bind_address=bind_address, bind_port=bind_port)
@@ -475,14 +531,16 @@ def stop():
def get_server_url():
return global_server.server_url
+
def start():
global global_server
with _global_server_lock:
- if global_server is not None: return
+ if global_server is not None:
+ return
# Workaround https://bugs.python.org/issue37373
# https://www.tornadoweb.org/en/stable/index.html#installation
- if sys.platform == 'win32' and sys.version_info >= (3, 8):
+ if sys.platform == "win32" and sys.version_info >= (3, 8):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
global_server = Server(**global_server_args)
diff --git a/python/neuroglancer/skeleton.py b/python/neuroglancer/skeleton.py
index fa7a6cdc05..f7a80adf48 100644
--- a/python/neuroglancer/skeleton.py
+++ b/python/neuroglancer/skeleton.py
@@ -12,27 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import collections
import io
import struct
import numpy as np
-import six
-from . import random_token
-from . import trackable_state
+from . import random_token, trackable_state
-class Skeleton(object):
+class Skeleton:
def __init__(self, vertex_positions, edges, vertex_attributes=None):
- self.vertex_positions = np.array(vertex_positions, dtype=' 0:
- for name, info in six.iteritems(source.vertex_attributes):
-
- attribute = np.array(vertex_attributes[name],
- np.dtype(info.data_type).newbyteorder('<'))
+ for name, info in source.vertex_attributes.items():
+ attribute = np.array(
+ vertex_attributes[name], np.dtype(info.data_type).newbyteorder("<")
+ )
expected_shape = (vertex_positions.shape[0], info.num_components)
- if (attribute.shape[0] != expected_shape[0] or
- attribute.size != np.prod(expected_shape)):
- raise ValueError('Expected attribute %r to have shape %r, but was: %r' %
- (name, expected_shape, attribute.shape))
+ if attribute.shape[0] != expected_shape[0] or attribute.size != np.prod(
+ expected_shape
+ ):
+ raise ValueError(
+ "Expected attribute {!r} to have shape {!r}, but was: {!r}".format(
+ name, expected_shape, attribute.shape
+ )
+ )
result.write(attribute.tobytes())
return result.getvalue()
-VertexAttributeInfo = collections.namedtuple('VertexAttributeInfo', ['data_type', 'num_components'])
+VertexAttributeInfo = collections.namedtuple(
+ "VertexAttributeInfo", ["data_type", "num_components"]
+)
-class SkeletonSource(trackable_state.ChangeNotifier):
+class SkeletonSource(trackable_state.ChangeNotifier):
def __init__(self, dimensions, voxel_offset=None):
- super(SkeletonSource, self).__init__()
+ super().__init__()
self.dimensions = dimensions
if voxel_offset is None:
voxel_offset = np.zeros(dimensions.rank, dtype=np.float64)
self.voxel_offset = voxel_offset
- self.vertex_attributes = collections.OrderedDict()
+ self.vertex_attributes = {}
self.token = random_token.make_random_token()
def info(self):
@@ -88,9 +91,11 @@ def get_skeleton(self, object_id):
raise NotImplementedError
def get_vertex_attributes_spec(self):
- temp = collections.OrderedDict()
- for k, v in six.iteritems(self.vertex_attributes):
- temp[k] = dict(dataType=np.dtype(v.data_type).name, numComponents=v.num_components)
+ temp = {}
+ for k, v in self.vertex_attributes.items():
+ temp[k] = dict(
+ dataType=np.dtype(v.data_type).name, numComponents=v.num_components
+ )
return temp
def invalidate(self):
diff --git a/python/neuroglancer/static/__init__.py b/python/neuroglancer/static/__init__.py
index 4f859687af..649ef35a93 100644
--- a/python/neuroglancer/static/__init__.py
+++ b/python/neuroglancer/static/__init__.py
@@ -18,22 +18,21 @@
import re
mime_type_map = {
- '.css': 'text/css',
- '.js': 'application/javascript',
- '.html': 'text/html',
- '.map': 'application/json'
+ ".css": "text/css",
+ ".js": "application/javascript",
+ ".html": "text/html",
+ ".map": "application/json",
}
def guess_mime_type_from_path(path):
- return mime_type_map.get(posixpath.splitext(path)[1], 'application/octet-stream')
+ return mime_type_map.get(posixpath.splitext(path)[1], "application/octet-stream")
-class StaticContentSource(object):
-
+class StaticContentSource:
def get(self, name):
- if name == '':
- name = 'index.html'
+ if name == "":
+ name = "index.html"
return self.get_content(name), guess_mime_type_from_path(name)
def get_content(self, name):
@@ -41,10 +40,9 @@ def get_content(self, name):
class ImportlibResourcesContentSource(StaticContentSource):
-
def get_content(self, name):
- if not re.match(r'^[a-z][a-z_\-\.]*\.(?:js|js\.map|css|html)$', name):
- raise ValueError('Invalid static resource name: %r' % name)
+ if not re.match(r"^[a-z][a-z_\-\.]*\.(?:js|js\.map|css|html)$", name):
+ raise ValueError("Invalid static resource name: %r" % name)
path = importlib.resources.files(__name__).joinpath(name)
if path.is_file():
return path.read_bytes()
@@ -57,21 +55,20 @@ def get_content(self, name):
class HttpSource(StaticContentSource):
-
def __init__(self, url):
self.url = url
def get_content(self, name):
import requests
+
full_url = posixpath.join(self.url, name)
r = requests.get(full_url)
if r.status_code >= 200 and r.status_code < 300:
return r.content
- raise ValueError('Failed to retrieve %r: %s' % (full_url, r.reason))
+ raise ValueError(f"Failed to retrieve {full_url!r}: {r.reason}")
class FileSource(StaticContentSource):
-
def __init__(self, path, file_open=None):
self.file_path = path
self.file_open = file_open or open
@@ -79,10 +76,10 @@ def __init__(self, path, file_open=None):
def get_content(self, name):
full_path = os.path.join(self.file_path, name)
try:
- with self.file_open(full_path, 'rb') as f:
+ with self.file_open(full_path, "rb") as f:
return f.read()
except Exception as e:
- raise ValueError('Failed to read local path %r: %s' % (full_path, e))
+ raise ValueError(f"Failed to read local path {full_path!r}: {e}")
def get_default_static_content_source():
diff --git a/python/neuroglancer/static_file_server.py b/python/neuroglancer/static_file_server.py
index 9ebd4d1f8e..5e0895a55f 100644
--- a/python/neuroglancer/static_file_server.py
+++ b/python/neuroglancer/static_file_server.py
@@ -13,28 +13,22 @@
# limitations under the License.
-import asyncio
-import concurrent.futures
-import threading
-
-import tornado.web
import tornado.httpserver
import tornado.netutil
+import tornado.web
-from . import async_util
-import neuroglancer.server
import neuroglancer.random_token
+import neuroglancer.server
-
+from . import async_util
class CorsStaticFileHandler(tornado.web.StaticFileHandler):
-
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with, range")
self.set_header("Access-Control-Expose-Headers", "content-range")
- self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
+ self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
def options(self, *args):
self.set_status(204)
@@ -42,9 +36,10 @@ def options(self, *args):
class StaticFileServer(async_util.BackgroundTornadoServer):
-
- def __init__(self, static_dir: str, bind_address: str = 'localhost', daemon = False) -> None:
- super().__init__(daemon = daemon)
+ def __init__(
+ self, static_dir: str, bind_address: str = "localhost", daemon=False
+ ) -> None:
+ super().__init__(daemon=daemon)
self.bind_address = bind_address
self.static_dir = static_dir
@@ -57,9 +52,7 @@ def __exit__(self, exc_type, exc_value, traceback):
def _attempt_to_start_server(self):
token = neuroglancer.random_token.make_random_token()
handlers = [
- (fr'/{token}/(.*)', CorsStaticFileHandler, {
- 'path': self.static_dir
- }),
+ (rf"/{token}/(.*)", CorsStaticFileHandler, {"path": self.static_dir}),
]
settings = {}
self.app = tornado.web.Application(handlers, settings=settings)
@@ -68,4 +61,4 @@ def _attempt_to_start_server(self):
self.http_server.add_sockets(sockets)
actual_port = sockets[0].getsockname()[1]
server_url = neuroglancer.server._get_server_url(self.bind_address, actual_port)
- self.url = f'{server_url}/{token}'
+ self.url = f"{server_url}/{token}"
diff --git a/python/neuroglancer/test_util.py b/python/neuroglancer/test_util.py
index 06110fb6f4..942e64f708 100644
--- a/python/neuroglancer/test_util.py
+++ b/python/neuroglancer/test_util.py
@@ -1,12 +1,13 @@
import os
+
def check_golden_contents(path, expected_contents, write=None):
if write is None:
- write = os.getenv('NEUROGLANCER_GENERATE_GOLDEN') == '1'
+ write = os.getenv("NEUROGLANCER_GENERATE_GOLDEN") == "1"
if write:
- with open(path, 'wb') as f:
+ with open(path, "wb") as f:
f.write(expected_contents)
else:
- with open(path, 'rb') as f:
+ with open(path, "rb") as f:
contents = f.read()
assert contents == expected_contents
diff --git a/python/neuroglancer/test_utils.py b/python/neuroglancer/test_utils.py
index 821e7be73d..3377bd95d8 100644
--- a/python/neuroglancer/test_utils.py
+++ b/python/neuroglancer/test_utils.py
@@ -13,15 +13,17 @@
# limitations under the License.
import time
-from typing import Callable, TypeVar, Type, Tuple
-
+from typing import Callable, TypeVar
T = TypeVar("T")
-def retry(func: Callable[[], T], max_attempts: int,
- delay: float = 0.01,
- exceptions: Tuple[Type[Exception], ...] = (Exception,)) -> T:
+def retry(
+ func: Callable[[], T],
+ max_attempts: int,
+ delay: float = 0.01,
+ exceptions: tuple[type[Exception], ...] = (Exception,),
+) -> T:
"""Invokes `func` up to `max_attempts` times.
Reties after a delay of `delay` if an exception in `exceptions` is raised.
@@ -36,7 +38,7 @@ def retry(func: Callable[[], T], max_attempts: int,
Raises:
First exception not in `exceptions`, or any exception on last attempt.
"""
- for i in range(max_attempts-1):
+ for i in range(max_attempts - 1):
try:
return func()
except exceptions:
diff --git a/python/neuroglancer/tool/agglomeration_split_tool.py b/python/neuroglancer/tool/agglomeration_split_tool.py
index 236845494f..a57544a011 100755
--- a/python/neuroglancer/tool/agglomeration_split_tool.py
+++ b/python/neuroglancer/tool/agglomeration_split_tool.py
@@ -1,20 +1,18 @@
#!/usr/bin/env python
-from __future__ import print_function
import argparse
import collections
-import uuid
import copy
import heapq
import json
-import re
-import sqlite3
import logging
import os
+import re
+import sqlite3
+import uuid
import numpy as np
-import six
import neuroglancer
import neuroglancer.cli
@@ -30,7 +28,7 @@ def normalize_edge(e):
return id_a, id_b
-class GreedyMulticut(object):
+class GreedyMulticut:
def __init__(self, combine_edges, edge_priority):
# Contains (score, edge_map_value) tuple values in heap order. The
# edge_map_value is the actual corresponding value in edge_map, not a copy.
@@ -84,7 +82,7 @@ def remove_edge_from_heap(self, segment_ids):
def check_consistency(self):
self._initialize_heap()
expected_regions = dict()
- for key, entry in six.viewitems(self.edge_map):
+ for key, entry in self.edge_map.items():
assert entry[1] == key
expected_regions.setdefault(key[0], set()).add(key[1])
expected_regions.setdefault(key[1], set()).add(key[0])
@@ -111,7 +109,9 @@ def merge(self, e):
new_edge = self.edge_map.get(new_ids)
expired_edge = self.edge_map[expired_ids]
if new_edge is not None:
- edge_data = new_edge[2] = self.combine_edges(new_edge[2], expired_edge[2])
+ edge_data = new_edge[2] = self.combine_edges(
+ new_edge[2], expired_edge[2]
+ )
if new_edge[0] is not None:
self.num_valid_edges -= 1
if expired_edge[0] is not None:
@@ -152,34 +152,40 @@ def get_next_edge(self):
return entry
-Edge = collections.namedtuple('Edge', ['segment_ids', 'score', 'position'])
+Edge = collections.namedtuple("Edge", ["segment_ids", "score", "position"])
def load_edges(path):
edges = []
- with open(path, 'r') as f:
+ with open(path) as f:
f.readline()
for line in f:
- parts = line.split(',')
+ parts = line.split(",")
segment_a = int(parts[0].strip())
segment_b = int(parts[1].strip())
score = float(parts[2].strip())
- position = (int(parts[3].strip()), int(parts[4].strip()), int(parts[5].strip()))
- edges.append(Edge(segment_ids=(segment_a, segment_b), score=score, position=position))
+ position = (
+ int(parts[3].strip()),
+ int(parts[4].strip()),
+ int(parts[5].strip()),
+ )
+ edges.append(
+ Edge(segment_ids=(segment_a, segment_b), score=score, position=position)
+ )
return edges
def load_split_seeds(path):
- with open(path, 'r') as f:
+ with open(path) as f:
raw_seeds = json.loads(f.read())
- seeds = collections.OrderedDict()
+ seeds = {}
for component in raw_seeds:
- seeds.setdefault(component['label'], []).extend(component['supervoxels'])
+ seeds.setdefault(component["label"], []).extend(component["supervoxels"])
return seeds
def build_graph(edges):
- logging.info('Building graph with %d edges', len(edges))
+ logging.info("Building graph with %d edges", len(edges))
def combine_edges(a, b):
return a + b
@@ -196,7 +202,7 @@ def edge_priority(x):
return greedy_multicut
-class AgglomerationGraph(object):
+class AgglomerationGraph:
def __init__(self, conn):
self.conn = conn
self.agglo_members_cache = dict()
@@ -204,7 +210,10 @@ def __init__(self, conn):
def get_agglo_id(self, supervoxel_id):
c = self.conn.cursor()
- c.execute('SELECT agglo_id FROM supervoxels WHERE supervoxel_id=?', (int(supervoxel_id), ))
+ c.execute(
+ "SELECT agglo_id FROM supervoxels WHERE supervoxel_id=?",
+ (int(supervoxel_id),),
+ )
result = c.fetchone()
if result is None:
return supervoxel_id
@@ -215,7 +224,9 @@ def get_agglo_members(self, agglo_id):
if result is not None:
return result
c = self.conn.cursor()
- c.execute('SELECT supervoxel_id FROM supervoxels WHERE agglo_id=?', (int(agglo_id), ))
+ c.execute(
+ "SELECT supervoxel_id FROM supervoxels WHERE agglo_id=?", (int(agglo_id),)
+ )
result = [row[0] for row in c.fetchall()]
self.agglo_members_cache[agglo_id] = result
return result
@@ -225,10 +236,16 @@ def get_agglo_edges(self, agglo_id):
if result is not None:
return result
c = self.conn.cursor()
- c.execute('SELECT segment_a, segment_b, score, x, y, z FROM edges WHERE agglo_id=?',
- (int(agglo_id), ))
+ c.execute(
+ "SELECT segment_a, segment_b, score, x, y, z FROM edges WHERE agglo_id=?",
+ (int(agglo_id),),
+ )
result = [
- Edge(segment_ids=(row[0], row[1]), score=row[2], position=(row[3], row[4], row[5]))
+ Edge(
+ segment_ids=(row[0], row[1]),
+ score=row[2],
+ position=(row[3], row[4], row[5]),
+ )
for row in c.fetchall()
]
self.agglo_edges_cache[agglo_id] = result
@@ -241,7 +258,7 @@ def _make_supervoxel_map(graph, split_seeds, need_agglo_ids):
for label in [0, 1]:
for seed in split_seeds[label]:
- supervoxel_id = seed['supervoxel_id']
+ supervoxel_id = seed["supervoxel_id"]
if need_agglo_ids:
agglo_id = graph.get_agglo_id(supervoxel_id)
if agglo_id == 0:
@@ -252,56 +269,73 @@ def _make_supervoxel_map(graph, split_seeds, need_agglo_ids):
def do_split(graph, split_seeds, agglo_id=None, supervoxels=None):
-
- agglo_ids, supervoxel_map = _make_supervoxel_map(graph, split_seeds, need_agglo_ids=agglo_id is None)
+ agglo_ids, supervoxel_map = _make_supervoxel_map(
+ graph, split_seeds, need_agglo_ids=agglo_id is None
+ )
if agglo_id is None:
-
agglo_id_counts = {
- agglo_id: sum(z[1]['count'] for z in seeds)
- for agglo_id, seeds in six.viewitems(agglo_ids)
+ agglo_id: sum(z[1]["count"] for z in seeds)
+ for agglo_id, seeds in agglo_ids.items()
}
agglo_id = max(agglo_ids, key=lambda x: agglo_id_counts[x])
if len(agglo_ids) > 1:
- logging.info('Warning: more than one agglomerated component. ' +
- 'Choosing component %d with maximum number of seed points.', agglo_id)
- logging.info('agglo_id_counts = %r', agglo_id_counts)
+ logging.info(
+ "Warning: more than one agglomerated component. "
+ + "Choosing component %d with maximum number of seed points.",
+ agglo_id,
+ )
+ logging.info("agglo_id_counts = %r", agglo_id_counts)
input_edges = graph.get_agglo_edges(agglo_id)
if supervoxels is not None:
- input_edges = [x for x in input_edges if x.segment_ids[0] in supervoxels and x.segment_ids[1] in supervoxels]
+ input_edges = [
+ x
+ for x in input_edges
+ if x.segment_ids[0] in supervoxels and x.segment_ids[1] in supervoxels
+ ]
graph = build_graph(input_edges)
if debug_graph:
graph.check_consistency()
cur_eqs = neuroglancer.EquivalenceMap()
- logging.info('Agglomerating')
- threshold = float('inf')
+ logging.info("Agglomerating")
+ threshold = float("inf")
while True:
entry = graph.get_next_edge()
if entry is None:
if verbose_merging:
- logging.info('Stopping because entry is None')
+ logging.info("Stopping because entry is None")
break
if entry[0] > threshold:
if verbose_merging:
- logging.info('Stopping because edge score %r is > threshold %r', entry[0],
- threshold)
+ logging.info(
+ "Stopping because edge score %r is > threshold %r",
+ entry[0],
+ threshold,
+ )
break
segment_ids = entry[1]
seeds_a = supervoxel_map.get(segment_ids[0])
seeds_b = supervoxel_map.get(segment_ids[1])
- if ((seeds_a is not None and len(seeds_a) > 1) or (seeds_b is not None and len(seeds_b) > 1)
- or (seeds_a is not None and seeds_b is not None and seeds_a != seeds_b)):
+ if (
+ (seeds_a is not None and len(seeds_a) > 1)
+ or (seeds_b is not None and len(seeds_b) > 1)
+ or (seeds_a is not None and seeds_b is not None and seeds_a != seeds_b)
+ ):
if verbose_merging:
- logging.info('Excluding edge %r because of seeds: %r %r', segment_ids, seeds_a,
- seeds_b)
+ logging.info(
+ "Excluding edge %r because of seeds: %r %r",
+ segment_ids,
+ seeds_a,
+ seeds_b,
+ )
graph.remove_edge_from_heap(segment_ids)
continue
if verbose_merging:
- logging.info('Merging %r with score %r', segment_ids, entry[0])
+ logging.info("Merging %r with score %r", segment_ids, entry[0])
graph.merge(segment_ids)
if debug_graph:
graph.check_consistency()
@@ -314,14 +348,14 @@ def do_split(graph, split_seeds, agglo_id=None, supervoxels=None):
return dict(agglo_id=agglo_id, cur_eqs=cur_eqs, supervoxel_map=supervoxel_map)
-def display_split_result(graph, agglo_id, cur_eqs, supervoxel_map, split_seeds, image_url,
- segmentation_url):
-
+def display_split_result(
+ graph, agglo_id, cur_eqs, supervoxel_map, split_seeds, image_url, segmentation_url
+):
agglo_members = set(graph.get_agglo_members(agglo_id))
state = neuroglancer.ViewerState()
- state.layers.append(name='image', layer=neuroglancer.ImageLayer(source=image_url))
+ state.layers.append(name="image", layer=neuroglancer.ImageLayer(source=image_url))
state.layers.append(
- name='original',
+ name="original",
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
segments=agglo_members,
@@ -329,32 +363,33 @@ def display_split_result(graph, agglo_id, cur_eqs, supervoxel_map, split_seeds,
visible=False,
)
state.layers.append(
- name='isolated-supervoxels',
+ name="isolated-supervoxels",
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
- segments=set(x for x, seeds in six.viewitems(supervoxel_map) if len(seeds) > 1),
+ segments={x for x, seeds in supervoxel_map.items() if len(seeds) > 1},
),
visible=False,
)
state.layers.append(
- name='split',
+ name="split",
layer=neuroglancer.SegmentationLayer(
source=segmentation_url,
equivalences=cur_eqs,
- segments=set(cur_eqs[x] for x in agglo_members),
- ))
- for label, component in six.viewitems(split_seeds):
+ segments={cur_eqs[x] for x in agglo_members},
+ ),
+ )
+ for label, component in split_seeds.items():
state.layers.append(
- name='seed%d' % label,
+ name="seed%d" % label,
layer=neuroglancer.PointAnnotationLayer(
- points=[seed['position'] for seed in component],
+ points=[seed["position"] for seed in component],
),
)
state.show_slices = False
- state.layout = '3d'
+ state.layout = "3d"
all_seed_points = [
- seed['position'] for component in six.viewvalues(split_seeds) for seed in component
+ seed["position"] for component in split_seeds.values() for seed in component
]
state.voxel_coordinates = np.mean(all_seed_points, axis=0)
return state
@@ -362,16 +397,17 @@ def display_split_result(graph, agglo_id, cur_eqs, supervoxel_map, split_seeds,
def _set_viewer_seeds(s, seeds):
for inclusive in [False, True]:
- layer_name = 'inclusive-seeds' if inclusive else 'exclusive-seeds'
+ layer_name = "inclusive-seeds" if inclusive else "exclusive-seeds"
s.layers[layer_name] = neuroglancer.AnnotationLayer(
- annotation_color='green' if inclusive else 'red',
+ annotation_color="green" if inclusive else "red",
annotations=[
dict(
- type='point',
- id=x['id'],
- point=x['position'],
- description=str(x['supervoxel_id']),
- ) for x in seeds[inclusive]
+ type="point",
+ id=x["id"],
+ point=x["position"],
+ description=str(x["supervoxel_id"]),
+ )
+ for x in seeds[inclusive]
],
)
@@ -379,7 +415,7 @@ def _set_viewer_seeds(s, seeds):
def _get_viewer_seeds(s):
seeds = [[], []]
for inclusive in [False, True]:
- layer_name = 'inclusive-seeds' if inclusive else 'exclusive-seeds'
+ layer_name = "inclusive-seeds" if inclusive else "exclusive-seeds"
try:
layer = s.layers[layer_name]
except KeyError:
@@ -390,11 +426,12 @@ def _get_viewer_seeds(s):
id=x.id,
supervoxel_id=int(x.description),
position=tuple(map(int, x.point)),
- ))
+ )
+ )
return seeds
-class ComponentState(object):
+class ComponentState:
def __init__(self, data=None):
self.supervoxels = set()
self.seeds = [[], []]
@@ -402,17 +439,17 @@ def __init__(self, data=None):
self.load(data)
def load(self, data):
- self.supervoxels = set(data['supervoxels'])
- self.seeds = data['seeds']
+ self.supervoxels = set(data["supervoxels"])
+ self.seeds = data["seeds"]
def to_json(self):
return {
- 'supervoxels': sorted(self.supervoxels),
- 'seeds': self.seeds,
+ "supervoxels": sorted(self.supervoxels),
+ "seeds": self.seeds,
}
-class InteractiveState(object):
+class InteractiveState:
def __init__(self, path):
self.unused_supervoxels = set()
self.components = []
@@ -420,11 +457,11 @@ def __init__(self, path):
self.selected_component = None
def load(self):
- with open(self.path, 'r') as f:
+ with open(self.path) as f:
data = json.load(f)
- self.unused_supervoxels = set(data['unused_supervoxels'])
- self.components = map(ComponentState, data['components'])
- self.selected_component = data['selected_component']
+ self.unused_supervoxels = set(data["unused_supervoxels"])
+ self.components = map(ComponentState, data["components"])
+ self.selected_component = data["selected_component"]
def initialize(self, supervoxel_ids):
self.unused_supervoxels = set(supervoxel_ids)
@@ -433,16 +470,16 @@ def initialize(self, supervoxel_ids):
def to_json(self):
return {
- 'unused_supervoxels': sorted(self.unused_supervoxels),
- 'components': [x.to_json() for x in self.components],
- 'selected_component': self.selected_component,
+ "unused_supervoxels": sorted(self.unused_supervoxels),
+ "components": [x.to_json() for x in self.components],
+ "selected_component": self.selected_component,
}
def save(self):
if self.path is None:
return
- tmp_path = self.path + '.tmp'
- with open(tmp_path, 'w') as f:
+ tmp_path = self.path + ".tmp"
+ with open(tmp_path, "w") as f:
f.write(json.dumps(self.to_json()))
os.rename(tmp_path, self.path)
@@ -463,19 +500,19 @@ def cycle_selected_component(self, amount):
self.selected_component = len(self.components) - 1
else:
self.selected_component = (
- self.selected_component + amount + len(self.components)) % len(self.components)
+ self.selected_component + amount + len(self.components)
+ ) % len(self.components)
def add_seed(self, supervoxel_id, position, inclusive):
if self.selected_component is None:
return
c = self.components[self.selected_component]
c.seeds[inclusive].append(
- dict(
- supervoxel_id=supervoxel_id,
- position=position,
- id=uuid.uuid4().hex))
+ dict(supervoxel_id=supervoxel_id, position=position, id=uuid.uuid4().hex)
+ )
+
-class CachedSplitResult(object):
+class CachedSplitResult:
def __init__(self, state, graph, agglo_id):
self.state = state
self.graph = graph
@@ -503,15 +540,18 @@ def update(self):
self.selected_component = self.state.selected_component
self.seeds = copy.deepcopy(component.seeds)
self.supervoxels = set(component.supervoxels)
- print('Recomputing split result')
+ print("Recomputing split result")
self.split_result = do_split(
- graph=self.graph, split_seeds=self.seeds, agglo_id=self.agglo_id,
- supervoxels=self.supervoxels)
- print('Done recomputing split result')
+ graph=self.graph,
+ split_seeds=self.seeds,
+ agglo_id=self.agglo_id,
+ supervoxels=self.supervoxels,
+ )
+ print("Done recomputing split result")
return True
-class InteractiveSplitter(object):
+class InteractiveSplitter:
def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self.graph = graph
self.agglo_id = agglo_id
@@ -519,7 +559,8 @@ def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self.segmentation_url = segmentation_url
self.state = InteractiveState(state_path)
self.cached_split_result = CachedSplitResult(
- state=self.state, graph=self.graph, agglo_id=self.agglo_id)
+ state=self.state, graph=self.graph, agglo_id=self.agglo_id
+ )
self.agglo_members = set(self.graph.get_agglo_members(agglo_id))
if state_path is not None and os.path.exists(state_path):
@@ -528,44 +569,45 @@ def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self.state.initialize(self.agglo_members)
viewer = self.viewer = neuroglancer.Viewer()
- viewer.actions.add('inclusive-seed', self._add_inclusive_seed)
- viewer.actions.add('exclusive-seed', self._add_exclusive_seed)
- viewer.actions.add('next-component', self._next_component)
- viewer.actions.add('prev-component', self._prev_component)
- viewer.actions.add('new-component', self._make_new_component)
- viewer.actions.add('exclude-component', self._exclude_component)
- viewer.actions.add('exclude-all-but-component', self._exclude_all_but_component)
+ viewer.actions.add("inclusive-seed", self._add_inclusive_seed)
+ viewer.actions.add("exclusive-seed", self._add_exclusive_seed)
+ viewer.actions.add("next-component", self._next_component)
+ viewer.actions.add("prev-component", self._prev_component)
+ viewer.actions.add("new-component", self._make_new_component)
+ viewer.actions.add("exclude-component", self._exclude_component)
+ viewer.actions.add("exclude-all-but-component", self._exclude_all_but_component)
key_bindings = [
- ['bracketleft', 'prev-component'],
- ['bracketright', 'next-component'],
- ['at:dblclick0', 'exclude-component'],
- ['at:shift+mousedown2', 'exclude-all-but-component'],
- ['at:control+mousedown0', 'inclusive-seed'],
- ['at:shift+mousedown0', 'exclusive-seed'],
- ['enter', 'new-component'],
+ ["bracketleft", "prev-component"],
+ ["bracketright", "next-component"],
+ ["at:dblclick0", "exclude-component"],
+ ["at:shift+mousedown2", "exclude-all-but-component"],
+ ["at:control+mousedown0", "inclusive-seed"],
+ ["at:shift+mousedown0", "exclusive-seed"],
+ ["enter", "new-component"],
]
with viewer.txn() as s:
s.layers.append(
- name='image',
+ name="image",
layer=neuroglancer.ImageLayer(source=self.image_url),
)
s.layers.append(
- name='original',
+ name="original",
layer=neuroglancer.SegmentationLayer(
source=self.segmentation_url,
segments=self.agglo_members,
),
)
s.layers.append(
- name='unused',
- layer=neuroglancer.SegmentationLayer(source=self.segmentation_url,
- ),
+ name="unused",
+ layer=neuroglancer.SegmentationLayer(
+ source=self.segmentation_url,
+ ),
visible=False,
)
s.layers.append(
- name='split-result',
+ name="split-result",
layer=neuroglancer.SegmentationLayer(
source=self.segmentation_url,
segments=self.agglo_members,
@@ -575,8 +617,9 @@ def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self._update_state(s)
with viewer.config_state.txn() as s:
- s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
- for key, command in key_bindings))
+ s.status_messages["help"] = "KEYS: " + " | ".join(
+ f"{key}={command}" for key, command in key_bindings
+ )
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.input_event_bindings.slice_view[key] = command
@@ -584,7 +627,8 @@ def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
self._update_config_state(s)
viewer.shared_state.add_changed_callback(
- lambda: viewer.defer_callback(self._handle_state_changed))
+ lambda: viewer.defer_callback(self._handle_state_changed)
+ )
def _add_inclusive_seed(self, s):
self._add_seed(s, True)
@@ -603,8 +647,10 @@ def _exclude_component(self, s):
return
self.cached_split_result.update()
- members = set(self.cached_split_result.split_result['cur_eqs'].members(supervoxel_id))
- component.supervoxels = set(x for x in component.supervoxels if x not in members)
+ members = set(
+ self.cached_split_result.split_result["cur_eqs"].members(supervoxel_id)
+ )
+ component.supervoxels = {x for x in component.supervoxels if x not in members}
self.state.unused_supervoxels.update(members)
self._update_view()
@@ -619,13 +665,14 @@ def _exclude_all_but_component(self, s):
return
self.cached_split_result.update()
- members = set(self.cached_split_result.split_result['cur_eqs'].members(supervoxel_id))
- new_unused = set(x for x in component.supervoxels if x not in members)
+ members = set(
+ self.cached_split_result.split_result["cur_eqs"].members(supervoxel_id)
+ )
+ new_unused = {x for x in component.supervoxels if x not in members}
component.supervoxels = members
self.state.unused_supervoxels.update(new_unused)
self._update_view()
-
def _make_new_component(self, s):
del s
self.state.make_new_component()
@@ -653,11 +700,11 @@ def _handle_state_changed(self):
self._update_state(s)
def _get_mouse_supervoxel(self, s):
- supervoxel_id = s.selected_values.get('original')
+ supervoxel_id = s.selected_values.get("original")
if supervoxel_id is not None:
supervoxel_id = supervoxel_id.value
if supervoxel_id is None:
- m = s.selected_values.get('split-result')
+ m = s.selected_values.get("split-result")
if m is not None:
m = m.value
if m is not None:
@@ -686,57 +733,84 @@ def _update_view(self):
def _update_config_state(self, s):
if self.state.selected_component is None:
- msg = '[No component selected] %d unused supervoxels' % len(
- self.state.unused_supervoxels)
+ msg = "[No component selected] %d unused supervoxels" % len(
+ self.state.unused_supervoxels
+ )
else:
selected_component = self.state.selected_component
- msg = '[Component %d/%d] : %d supervoxels, %d connected components, %d unused' % (
- selected_component, len(self.state.components),
- len(self.cached_split_result.supervoxels),
- len(self.cached_split_result.split_result['cur_eqs'].sets()), len(self.state.unused_supervoxels))
- s.status_messages['status'] = msg
+ msg = (
+ "[Component %d/%d] : %d supervoxels, %d connected components, %d unused"
+ % (
+ selected_component,
+ len(self.state.components),
+ len(self.cached_split_result.supervoxels),
+ len(self.cached_split_result.split_result["cur_eqs"].sets()),
+ len(self.state.unused_supervoxels),
+ )
+ )
+ s.status_messages["status"] = msg
def _update_state(self, s):
self.cached_split_result.update()
self.state.save()
_set_viewer_seeds(s, self.cached_split_result.seeds)
- s.layers['unused'].segments = self.state.unused_supervoxels
- s.layers['original'].segments = self.cached_split_result.supervoxels
- s.layers['split-result'].segments = self.cached_split_result.supervoxels
+ s.layers["unused"].segments = self.state.unused_supervoxels
+ s.layers["original"].segments = self.cached_split_result.supervoxels
+ s.layers["split-result"].segments = self.cached_split_result.supervoxels
split_result = self.cached_split_result.split_result
if split_result is not None:
self._show_split_result(
s,
- cur_eqs=split_result['cur_eqs'],
+ cur_eqs=split_result["cur_eqs"],
)
- s.layout = neuroglancer.row_layout([
- neuroglancer.LayerGroupViewer(
- layout='3d',
- layers=['image', 'original', 'unused', 'inclusive-seeds', 'exclusive-seeds']),
- neuroglancer.LayerGroupViewer(
- layout='3d', layers=['image', 'split-result', 'inclusive-seeds',
- 'exclusive-seeds']),
- ])
+ s.layout = neuroglancer.row_layout(
+ [
+ neuroglancer.LayerGroupViewer(
+ layout="3d",
+ layers=[
+ "image",
+ "original",
+ "unused",
+ "inclusive-seeds",
+ "exclusive-seeds",
+ ],
+ ),
+ neuroglancer.LayerGroupViewer(
+ layout="3d",
+ layers=[
+ "image",
+ "split-result",
+ "inclusive-seeds",
+ "exclusive-seeds",
+ ],
+ ),
+ ]
+ )
def _show_split_result(self, s, cur_eqs):
- split_layer = s.layers['split-result']
+ split_layer = s.layers["split-result"]
split_layer.equivalences = cur_eqs
- split_layer.segments = set(cur_eqs[x] for x in self.cached_split_result.supervoxels)
+ split_layer.segments = {
+ cur_eqs[x] for x in self.cached_split_result.supervoxels
+ }
def run_batch(args, graph):
for path in args.split_seeds:
split_seeds = load_split_seeds(path)
- split_result = do_split(graph=graph, split_seeds=split_seeds, agglo_id=args.agglo_id)
+ split_result = do_split(
+ graph=graph, split_seeds=split_seeds, agglo_id=args.agglo_id
+ )
state = display_split_result(
graph=graph,
split_seeds=split_seeds,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
- **split_result)
- print('%s
' % (neuroglancer.to_url(state), path))
+ **split_result,
+ )
+ print(f'{path}
')
def run_interactive(args, graph):
@@ -751,53 +825,73 @@ def run_interactive(args, graph):
agglo_id=args.agglo_id,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
- state_path=args.state)
+ state_path=args.state,
+ )
print(splitter.viewer)
def open_graph(path, agglo_id):
# Check if graph_db is sharded
graph_db = path
- m = re.match(r'(.*)@([0-9]+)((?:\..*)?)$', graph_db)
+ m = re.match(r"(.*)@([0-9]+)((?:\..*)?)$", graph_db)
if m is not None:
num_shards = int(m.group(2))
shard = agglo_id % num_shards
- graph_db = m.group(1) + ('-%05d-of-%05d' % (shard, num_shards)) + m.group(3)
+ graph_db = m.group(1) + ("-%05d-of-%05d" % (shard, num_shards)) + m.group(3)
return AgglomerationGraph(sqlite3.connect(graph_db, check_same_thread=False))
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument('-v', '--verbose', action='store_true', help='Display verbose log messages.')
+ ap.add_argument(
+ "-v", "--verbose", action="store_true", help="Display verbose log messages."
+ )
common_ap = argparse.ArgumentParser(add_help=False)
common_ap.add_argument(
- '--graph-db', required=True, help='Path to sqlite3 database specifying agglomeration graph')
+ "--graph-db",
+ required=True,
+ help="Path to sqlite3 database specifying agglomeration graph",
+ )
common_ap.add_argument(
- '--image-url', required=True, help='Neuroglancer data source URL for image')
+ "--image-url", required=True, help="Neuroglancer data source URL for image"
+ )
common_ap.add_argument(
- '--segmentation-url', required=True, help='Neuroglancer data source URL for segmentation')
+ "--segmentation-url",
+ required=True,
+ help="Neuroglancer data source URL for segmentation",
+ )
- sub_aps = ap.add_subparsers(help='command to run')
+ sub_aps = ap.add_subparsers(help="command to run")
interactive_ap = sub_aps.add_parser(
- 'interactive', help='Interactively split an aglomerated component', parents=[common_ap])
+ "interactive",
+ help="Interactively split an aglomerated component",
+ parents=[common_ap],
+ )
batch_ap = sub_aps.add_parser(
- 'batch', help='Split based on pre-specified seed files', parents=[common_ap])
+ "batch", help="Split based on pre-specified seed files", parents=[common_ap]
+ )
interactive_ap.add_argument(
- '--agglo-id', type=int, required=True, help='Agglomerated component id to split')
- interactive_ap.add_argument('--split-seeds', help='Path to JSON file specifying split seeds')
- interactive_ap.add_argument('--state', help='Path to JSON state file.')
+ "--agglo-id", type=int, required=True, help="Agglomerated component id to split"
+ )
+ interactive_ap.add_argument(
+ "--split-seeds", help="Path to JSON file specifying split seeds"
+ )
+ interactive_ap.add_argument("--state", help="Path to JSON state file.")
neuroglancer.cli.add_server_arguments(interactive_ap)
interactive_ap.set_defaults(func=run_interactive)
batch_ap.add_argument(
- '--split-seeds', nargs='+', help='Path to JSON file specifying split seeds')
- batch_ap.add_argument('--agglo-id', type=int, help='Agglomerated component id to split')
+ "--split-seeds", nargs="+", help="Path to JSON file specifying split seeds"
+ )
+ batch_ap.add_argument(
+ "--agglo-id", type=int, help="Agglomerated component id to split"
+ )
batch_ap.set_defaults(func=run_batch)
args = ap.parse_args()
diff --git a/python/neuroglancer/tool/cube.py b/python/neuroglancer/tool/cube.py
index 1ae98455ab..33ac8e27de 100644
--- a/python/neuroglancer/tool/cube.py
+++ b/python/neuroglancer/tool/cube.py
@@ -3,14 +3,16 @@
import argparse
import neuroglancer
-import neuroglancer.coordinate_space
import neuroglancer.cli
+import neuroglancer.coordinate_space
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument("--cube-size",
- type=neuroglancer.coordinate_space.parse_unit,
- default=(4e-6, "m"))
+ ap.add_argument(
+ "--cube-size",
+ type=neuroglancer.coordinate_space.parse_unit_and_scale,
+ default=(4e-6, "m"),
+ )
neuroglancer.cli.add_server_arguments(ap)
neuroglancer.cli.add_state_arguments(ap, required=True)
args = ap.parse_args()
@@ -40,23 +42,25 @@
[0, 0, 0, 1], # x+y+
]
-
# Add 6 cube faces
for face_dim in range(3):
for face_dir in range(2):
- state.layout.type = '3d'
+ state.layout.type = "3d"
position = list(state.position)
- position[face_dim] += ((face_dir * 2 - 1) * cube_size / 2 /
- state.dimensions[face_dim].scale)
- state.layout.cross_sections['%d_%d' % (face_dim, face_dir)] = neuroglancer.CrossSection(
+ position[face_dim] += (
+ (face_dir * 2 - 1) * cube_size / 2 / state.dimensions[face_dim].scale
+ )
+ state.layout.cross_sections[
+ "%d_%d" % (face_dim, face_dir)
+ ] = neuroglancer.CrossSection(
width=cube_size / canonical_scale,
height=cube_size / canonical_scale,
- position=neuroglancer.LinkedPosition(link='relative', value=position),
+ position=neuroglancer.LinkedPosition(link="relative", value=position),
orientation=neuroglancer.LinkedOrientationState(
- link='unlinked',
+ link="unlinked",
value=orientations[face_dim],
),
- scale=neuroglancer.LinkedZoomFactor(link='unlinked', value=1),
+ scale=neuroglancer.LinkedZoomFactor(link="unlinked", value=1),
)
print(neuroglancer.to_url(state))
diff --git a/python/neuroglancer/tool/filter_bodies.py b/python/neuroglancer/tool/filter_bodies.py
index cf8b00ce71..fb156d7b83 100644
--- a/python/neuroglancer/tool/filter_bodies.py
+++ b/python/neuroglancer/tool/filter_bodies.py
@@ -1,39 +1,42 @@
-from __future__ import division
-
-import json
-import os
-import copy
-import collections
import argparse
+import collections
+import copy
import csv
+import json
+import os
+
+import numpy as np
import neuroglancer
import neuroglancer.cli
-import numpy as np
-class State(object):
+class State:
def __init__(self, path):
self.path = path
- self.body_labels = collections.OrderedDict()
+ self.body_labels = {}
def load(self):
if os.path.exists(self.path):
- with open(self.path, 'r') as f:
- self.body_labels = collections.OrderedDict(json.load(f))
+ with open(self.path) as f:
+ self.body_labels = dict(json.load(f))
def save(self):
- tmp_path = self.path + '.tmp'
- with open(tmp_path, 'w') as f:
+ tmp_path = self.path + ".tmp"
+ with open(tmp_path, "w") as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
-Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
+Body = collections.namedtuple(
+ "Body", ["segment_id", "num_voxels", "bbox_start", "bbox_size"]
+)
-class Tool(object):
- def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
+class Tool:
+ def __init__(
+ self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch
+ ):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
@@ -43,39 +46,42 @@ def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
- s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
- s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
+ s.layers["image"] = neuroglancer.ImageLayer(source=image_url)
+ s.layers["segmentation"] = neuroglancer.SegmentationLayer(
+ source=segmentation_url
+ )
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
- s.layout = '3d'
+ s.layout = "3d"
key_bindings = [
- ['bracketleft', 'prev-index'],
- ['bracketright', 'next-index'],
- ['home', 'first-index'],
- ['end', 'last-index'],
- ['control+keys', 'save'],
+ ["bracketleft", "prev-index"],
+ ["bracketright", "next-index"],
+ ["home", "first-index"],
+ ["end", "last-index"],
+ ["control+keys", "save"],
]
- label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
+ label_keys = ["keyd", "keyf", "keyg", "keyh"]
for label, label_key in zip(labels, label_keys):
- key_bindings.append([label_key, 'label-%s' % label])
+ key_bindings.append([label_key, "label-%s" % label])
def label_func(s, label=label):
self.set_label(s, label)
- self.viewer.actions.add('label-%s' % label, label_func)
- self.viewer.actions.add('prev-index', self._prev_index)
- self.viewer.actions.add('next-index', self._next_index)
- self.viewer.actions.add('first-index', self._first_index)
- self.viewer.actions.add('last-index', self._last_index)
- self.viewer.actions.add('save', self.save)
+ self.viewer.actions.add("label-%s" % label, label_func)
+ self.viewer.actions.add("prev-index", self._prev_index)
+ self.viewer.actions.add("next-index", self._next_index)
+ self.viewer.actions.add("first-index", self._first_index)
+ self.viewer.actions.add("last-index", self._last_index)
+ self.viewer.actions.add("save", self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
- s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
- for key, command in key_bindings))
+ s.status_messages["help"] = "KEYS: " + " | ".join(
+ f"{key}={command}" for key, command in key_bindings
+ )
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
@@ -93,7 +99,7 @@ def set_index(self, index):
self.index = index
def modify_state_for_body(s, body):
- s.layers['segmentation'].segments = frozenset([body.segment_id])
+ s.layers["segmentation"].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
@@ -105,7 +111,7 @@ def modify_state_for_body(s, body):
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
- prefetch_state.layout = '3d'
+ prefetch_state.layout = "3d"
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
@@ -115,12 +121,19 @@ def modify_state_for_body(s, body):
for i, prefetch_state in enumerate(prefetch_states)
]
- label = self.state.body_labels.get(body.segment_id, '')
+ label = self.state.body_labels.get(body.segment_id, "")
with self.viewer.config_state.txn() as s:
- s.status_messages['status'] = (
- '[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
- (index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
- self.cumulative_voxels[index] / self.total_voxels, label))
+ s.status_messages["status"] = (
+ "[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s"
+ % (
+ index,
+ len(self.bodies),
+ self.cumulative_voxels[index],
+ self.total_voxels,
+ self.cumulative_voxels[index] / self.total_voxels,
+ label,
+ )
+ )
def save(self, s):
self.state.save()
@@ -142,42 +155,56 @@ def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
- ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
- ap.add_argument('--segmentation-url',
- required=True,
- help='Neuroglancer data source URL for segmentation')
- ap.add_argument('--state', required=True, help='Path to proofreading state file')
- ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
- ap.add_argument('--labels', nargs='+', help='Labels to use')
- ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
+ ap.add_argument(
+ "--image-url", required=True, help="Neuroglancer data source URL for image"
+ )
+ ap.add_argument(
+ "--segmentation-url",
+ required=True,
+ help="Neuroglancer data source URL for segmentation",
+ )
+ ap.add_argument("--state", required=True, help="Path to proofreading state file")
+ ap.add_argument(
+ "--bodies", required=True, help="Path to list of bodies to proofread"
+ )
+ ap.add_argument("--labels", nargs="+", help="Labels to use")
+ ap.add_argument(
+ "--prefetch", type=int, default=10, help="Number of bodies to prefetch"
+ )
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
- with open(args.bodies, 'r') as f:
+ with open(args.bodies) as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
- segment_id=int(row['id']),
- num_voxels=int(row['num_voxels']),
- bbox_start=np.array([
- int(row['bbox.start.x']),
- int(row['bbox.start.y']),
- int(row['bbox.start.z'])
- ],
- dtype=np.int64),
+ segment_id=int(row["id"]),
+ num_voxels=int(row["num_voxels"]),
+ bbox_start=np.array(
+ [
+ int(row["bbox.start.x"]),
+ int(row["bbox.start.y"]),
+ int(row["bbox.start.z"]),
+ ],
+ dtype=np.int64,
+ ),
bbox_size=np.array(
- [int(row['bbox.size.x']),
- int(row['bbox.size.y']),
- int(row['bbox.size.z'])],
- dtype=np.int64),
- ))
+ [
+ int(row["bbox.size.x"]),
+ int(row["bbox.size.y"]),
+ int(row["bbox.size.z"]),
+ ],
+ dtype=np.int64,
+ ),
+ )
+ )
tool = Tool(
state_path=args.state,
diff --git a/python/neuroglancer/tool/mask_tool.py b/python/neuroglancer/tool/mask_tool.py
index e38182bc15..1253a0eacd 100755
--- a/python/neuroglancer/tool/mask_tool.py
+++ b/python/neuroglancer/tool/mask_tool.py
@@ -13,34 +13,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import print_function, division
import argparse
-import collections
-import copy
-import json
import math
-import os
import uuid
import webbrowser
import numpy as np
-import six
import neuroglancer
import neuroglancer.cli
import neuroglancer.url_state
-from neuroglancer.json_utils import json_encoder_default
+
def _full_count_for_level(level):
- return (2**level)**3
+ return (2**level) ** 3
+
MAX_BLOCK_LEVEL = 5
-class BlockMask(object):
+
+class BlockMask:
def __init__(self, max_level=MAX_BLOCK_LEVEL):
# self.blocks[level_i][position] specifies the number of base elements contained within the block
- self.blocks = [dict() for _ in range(max_level+1)]
+ self.blocks = [dict() for _ in range(max_level + 1)]
def _remove_children(self, level, position):
position = tuple(position)
@@ -69,26 +65,29 @@ def _contains(self, level, position):
if level >= len(blocks):
return None, None
- def _add_children(self, level, position, excluded_child_position, excluded_child_count):
+ def _add_children(
+ self, level, position, excluded_child_position, excluded_child_count
+ ):
blocks = self.blocks
- full_count_for_child = _full_count_for_level(level-1)
+ full_count_for_child = _full_count_for_level(level - 1)
for offset in np.ndindex((2,) * 3):
child_position = tuple(x * 2 + o for x, o in zip(position, offset))
count = full_count_for_child
if child_position == excluded_child_position:
count -= excluded_child_count
if count != 0:
- blocks[level-1][child_position] = count
+ blocks[level - 1][child_position] = count
def _add_children_along_path(self, start_level, end_level, start_position):
excluded_count = _full_count_for_level(start_level)
while start_level < end_level:
parent_position = tuple(x // 2 for x in start_position)
start_level += 1
- self._add_children(start_level, parent_position, start_position, excluded_count)
+ self._add_children(
+ start_level, parent_position, start_position, excluded_count
+ )
start_position = parent_position
-
def add(self, level, position):
if self._contains(level, position)[0] is not None:
return
@@ -100,7 +99,8 @@ def add_or_remove_sphere(self, position, radius, add=True):
radius = np.array(radius, dtype=np.int64)
for off in np.ndindex(tuple(radius * 2 + 1)):
off = off - radius
- if sum((off / radius)**2) >= 1: continue
+ if sum((off / radius) ** 2) >= 1:
+ continue
if add:
self.add(0, position + off)
else:
@@ -114,7 +114,9 @@ def remove(self, level, position):
if old_level is None:
return
if old_level != level:
- self._adjust_count(old_level, position_in_old_level, -_full_count_for_level(level))
+ self._adjust_count(
+ old_level, position_in_old_level, -_full_count_for_level(level)
+ )
self._add_children_along_path(level, old_level, position)
return
if old_count != _full_count_for_level(level):
@@ -137,29 +139,31 @@ def _adjust_count(self, level, position, amount):
if level + 1 < len(self.blocks):
self._adjust_count(level + 1, tuple(x // 2 for x in position), amount)
+
def make_block_mask(annotations, block_size, max_level=MAX_BLOCK_LEVEL):
mask = BlockMask(max_level=max_level)
for x in annotations:
if not isinstance(x, neuroglancer.AxisAlignedBoundingBoxAnnotation):
- print('Warning: got non-box annotation: %r' % (x,))
+ print(f"Warning: got non-box annotation: {x!r}")
continue
size = (x.point_b - x.point_a) / block_size
if size[0] != int(size[0]) or np.any(size != size[0]):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
level = math.log(size[0]) / math.log(2)
if level != int(level):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
level = int(level)
eff_block_size = block_size * (2**level)
if np.any(x.point_a % eff_block_size != 0):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
position = tuple(int(z) for z in x.point_a // eff_block_size)
mask.add(level, position)
return mask
+
def make_annotations_from_mask(mask, block_size):
result = []
for level, position_counts in enumerate(mask.blocks):
@@ -172,44 +176,63 @@ def make_annotations_from_mask(mask, block_size):
position = np.array(position, dtype=np.int64)
box_start = eff_block_size * position
box_end = box_start + eff_block_size
- result.append(neuroglancer.AxisAlignedBoundingBoxAnnotation(
- point_a = box_start,
- point_b = box_end,
- id = uuid.uuid4().hex,
- ))
+ result.append(
+ neuroglancer.AxisAlignedBoundingBoxAnnotation(
+ point_a=box_start,
+ point_b=box_end,
+ id=uuid.uuid4().hex,
+ )
+ )
return result
def normalize_block_annotations(annotations, block_size, max_level=3):
- mask = make_block_mask(annotations=annotations, block_size=block_size, max_level=max_level)
+ mask = make_block_mask(
+ annotations=annotations, block_size=block_size, max_level=max_level
+ )
return make_annotations_from_mask(mask=mask, block_size=block_size)
-class Annotator(object):
+class Annotator:
def __init__(self):
- self.annotation_layer_name = 'false-merges'
+ self.annotation_layer_name = "false-merges"
self.false_merge_block_size = np.array([1, 1, 1], dtype=np.int64)
self.cur_block_level = 2
self.max_block_levels = 5
viewer = self.viewer = neuroglancer.Viewer()
self.other_state_segment_ids = dict()
- viewer.actions.add('anno-save', lambda s: self.save())
- viewer.actions.add('anno-mark-pre', lambda s: self.mark_synapse(s, layer='pre', add=True))
- viewer.actions.add('anno-unmark-pre', lambda s: self.unmark_synapse(s, layer='pre', add=false))
- viewer.actions.add('anno-mark-post', lambda s: self.mark_synapse(s, layer='post', add=True))
- viewer.actions.add('anno-unmark-post', lambda s: self.unmark_synapse(s, layer='post', add=false))
- viewer.actions.add('anno-decrease-block-size', self.decrease_block_size)
- viewer.actions.add('anno-increase-block-size', self.increase_block_size)
+ viewer.actions.add("anno-save", lambda s: self.save())
+ viewer.actions.add(
+ "anno-mark-pre", lambda s: self.mark_synapse(s, layer="pre", add=True)
+ )
+ viewer.actions.add(
+ "anno-unmark-pre", lambda s: self.unmark_synapse(s, layer="pre", add=False)
+ )
+ viewer.actions.add(
+ "anno-mark-post", lambda s: self.mark_synapse(s, layer="post", add=True)
+ )
+ viewer.actions.add(
+ "anno-unmark-post",
+ lambda s: self.unmark_synapse(s, layer="post", add=False),
+ )
+ viewer.actions.add("anno-decrease-block-size", self.decrease_block_size)
+ viewer.actions.add("anno-increase-block-size", self.increase_block_size)
with viewer.config_state.txn() as s:
- s.input_event_bindings.data_view['bracketleft'] = 'anno-decrease-block-size'
- s.input_event_bindings.data_view['bracketright'] = 'anno-increase-block-size'
- s.input_event_bindings.data_view['control+keys'] = 'anno-save'
- s.input_event_bindings.data_view['control+mousedown0'] = 'anno-mark-pre'
- s.input_event_bindings.data_view['control+shift+mousedown0'] = 'anno-unmark-pre'
- s.input_event_bindings.data_view['control+mousedown2'] = 'anno-mark-post'
- s.input_event_bindings.data_view['control+shift+mousedown2'] = 'anno-unmark-post'
+ s.input_event_bindings.data_view["bracketleft"] = "anno-decrease-block-size"
+ s.input_event_bindings.data_view[
+ "bracketright"
+ ] = "anno-increase-block-size"
+ s.input_event_bindings.data_view["control+keys"] = "anno-save"
+ s.input_event_bindings.data_view["control+mousedown0"] = "anno-mark-pre"
+ s.input_event_bindings.data_view[
+ "control+shift+mousedown0"
+ ] = "anno-unmark-pre"
+ s.input_event_bindings.data_view["control+mousedown2"] = "anno-mark-post"
+ s.input_event_bindings.data_view[
+ "control+shift+mousedown2"
+ ] = "anno-unmark-post"
self.cur_message = None
@@ -232,30 +255,40 @@ def mark_synapse(self, s, layer, add):
if s.layers.index(layer) == -1:
s.layers[layer] = neuroglancer.LocalAnnotationLayer(
dimensions=s.dimensions,
- shader='''
+ shader="""
void main() {
setBoundingBoxBorderWidth(0.0);
setBoundingBoxFillColor(defaultColor());
}
-''',
- annotation_color = '#0f0' if layer == 'pre' else '#00f',
+""",
+ annotation_color="#0f0" if layer == "pre" else "#00f",
)
annotations = s.layers[layer].annotations
- mask = make_block_mask(annotations=annotations, block_size=block_size, max_level=self.max_block_levels)
- mask.add_or_remove_sphere(np.array([int(x) for x in voxel_coordinates]),
- np.array([1, 1, 1]) * 2**level,
- add=add)
- new_annotations = make_annotations_from_mask(mask=mask, block_size=block_size)
+ mask = make_block_mask(
+ annotations=annotations,
+ block_size=block_size,
+ max_level=self.max_block_levels,
+ )
+ mask.add_or_remove_sphere(
+ np.array([int(x) for x in voxel_coordinates]),
+ np.array([1, 1, 1]) * 2**level,
+ add=add,
+ )
+ new_annotations = make_annotations_from_mask(
+ mask=mask, block_size=block_size
+ )
s.layers[layer].annotations = new_annotations
def update_message(self):
- message = '[Block size: %d vx] ' % (self.false_merge_block_size[0] * 2**self.false_merge_block_level)
+ message = "[Block size: %d vx] " % (
+ self.false_merge_block_size[0] * 2**self.false_merge_block_level
+ )
if message != self.cur_message:
with self.viewer.config_state.txn() as s:
if message is not None:
- s.status_messages['status'] = message
+ s.status_messages["status"] = message
else:
- s.status_messages.pop('status')
+ s.status_messages.pop("status")
self.cur_message = message
def show(self):
@@ -265,11 +298,12 @@ def get_viewer_url(self):
return self.viewer.get_viewer_url()
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument('--url', type=str)
+ ap.add_argument("--url", type=str)
ap.add_argument(
- '-n', '--no-webbrowser', action='store_true', help='Don\'t open the webbrowser.')
+ "-n", "--no-webbrowser", action="store_true", help="Don't open the webbrowser."
+ )
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
diff --git a/python/neuroglancer/tool/merge_tool.py b/python/neuroglancer/tool/merge_tool.py
index 2b86c60d9c..71ce19672c 100755
--- a/python/neuroglancer/tool/merge_tool.py
+++ b/python/neuroglancer/tool/merge_tool.py
@@ -13,10 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import print_function, division
import argparse
-import collections
import copy
import json
import math
@@ -31,18 +29,21 @@
import neuroglancer.url_state
from neuroglancer.json_utils import json_encoder_default
+
def get_segmentation_layer(layers):
for layer in layers:
if isinstance(layer.layer, neuroglancer.SegmentationLayer):
return layer
+
def _full_count_for_level(level):
- return (2**level)**3
+ return (2**level) ** 3
-class BlockMask(object):
+
+class BlockMask:
def __init__(self, max_level=3):
# self.blocks[level_i][position] specifies the number of base elements contained within the block
- self.blocks = [dict() for _ in range(max_level+1)]
+ self.blocks = [dict() for _ in range(max_level + 1)]
def _remove_children(self, level, position):
position = tuple(position)
@@ -71,26 +72,29 @@ def _contains(self, level, position):
if level >= len(blocks):
return None, None
- def _add_children(self, level, position, excluded_child_position, excluded_child_count):
+ def _add_children(
+ self, level, position, excluded_child_position, excluded_child_count
+ ):
blocks = self.blocks
- full_count_for_child = _full_count_for_level(level-1)
+ full_count_for_child = _full_count_for_level(level - 1)
for offset in np.ndindex((2,) * 3):
child_position = tuple(x * 2 + o for x, o in zip(position, offset))
count = full_count_for_child
if child_position == excluded_child_position:
count -= excluded_child_count
if count != 0:
- blocks[level-1][child_position] = count
+ blocks[level - 1][child_position] = count
def _add_children_along_path(self, start_level, end_level, start_position):
excluded_count = _full_count_for_level(start_level)
while start_level < end_level:
parent_position = tuple(x // 2 for x in start_position)
start_level += 1
- self._add_children(start_level, parent_position, start_position, excluded_count)
+ self._add_children(
+ start_level, parent_position, start_position, excluded_count
+ )
start_position = parent_position
-
def add(self, level, position):
if self._contains(level, position)[0] is not None:
return
@@ -106,7 +110,9 @@ def remove(self, level, position):
if old_level is None:
return
if old_level != level:
- self._adjust_count(old_level, position_in_old_level, -_full_count_for_level(level))
+ self._adjust_count(
+ old_level, position_in_old_level, -_full_count_for_level(level)
+ )
self._add_children_along_path(level, old_level, position)
return
if old_count != _full_count_for_level(level):
@@ -129,29 +135,31 @@ def _adjust_count(self, level, position, amount):
if level + 1 < len(self.blocks):
self._adjust_count(level + 1, tuple(x // 2 for x in position), amount)
+
def make_block_mask(annotations, block_size, max_level=3):
mask = BlockMask(max_level=max_level)
for x in annotations:
if not isinstance(x, neuroglancer.AxisAlignedBoundingBoxAnnotation):
- print('Warning: got non-box annotation: %r' % (x,))
+ print(f"Warning: got non-box annotation: {x!r}")
continue
size = (x.point_b - x.point_a) / block_size
if size[0] != int(size[0]) or np.any(size != size[0]):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
level = math.log(size[0]) / math.log(2)
if level != int(level):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
level = int(level)
eff_block_size = block_size * (2**level)
if np.any(x.point_a % eff_block_size != 0):
- print('Warning: got invalid box: %r' % (x,))
+ print(f"Warning: got invalid box: {x!r}")
continue
position = tuple(int(z) for z in x.point_a // eff_block_size)
mask.add(level, position)
return mask
+
def make_annotations_from_mask(mask, block_size):
result = []
for level, position_counts in enumerate(mask.blocks):
@@ -164,23 +172,27 @@ def make_annotations_from_mask(mask, block_size):
position = np.array(position, dtype=np.int64)
box_start = eff_block_size * position
box_end = box_start + eff_block_size
- result.append(neuroglancer.AxisAlignedBoundingBoxAnnotation(
- point_a = box_start,
- point_b = box_end,
- id = uuid.uuid4().hex,
- ))
+ result.append(
+ neuroglancer.AxisAlignedBoundingBoxAnnotation(
+ point_a=box_start,
+ point_b=box_end,
+ id=uuid.uuid4().hex,
+ )
+ )
return result
def normalize_block_annotations(annotations, block_size, max_level=3):
- mask = make_block_mask(annotations=annotations, block_size=block_size, max_level=max_level)
+ mask = make_block_mask(
+ annotations=annotations, block_size=block_size, max_level=max_level
+ )
return make_annotations_from_mask(mask=mask, block_size=block_size)
-class Annotator(object):
+class Annotator:
def __init__(self, filename):
self.filename = filename
- self.annotation_layer_name = 'false-merges'
+ self.annotation_layer_name = "false-merges"
self.states = []
self.state_index = None
self.false_merge_block_size = np.array([32, 32, 32], dtype=np.int64)
@@ -189,26 +201,40 @@ def __init__(self, filename):
viewer = self.viewer = neuroglancer.Viewer()
self.other_state_segment_ids = dict()
- viewer.actions.add('anno-next-state', lambda s: self.next_state())
- viewer.actions.add('anno-prev-state', lambda s: self.prev_state())
- viewer.actions.add('anno-save', lambda s: self.save())
- viewer.actions.add('anno-show-all', lambda s: self.set_combined_state())
- viewer.actions.add('anno-add-segments-from-state',
- lambda s: self.add_segments_from_state(s.viewer_state))
- viewer.actions.add('anno-mark-false-merge', self.mark_false_merge)
- viewer.actions.add('anno-unmark-false-merge', lambda s: self.mark_false_merge(s, erase=True))
- viewer.actions.add('anno-decrease-block-size', self.decrease_false_merge_block_size)
- viewer.actions.add('anno-increase-block-size', self.increase_false_merge_block_size)
+ viewer.actions.add("anno-next-state", lambda s: self.next_state())
+ viewer.actions.add("anno-prev-state", lambda s: self.prev_state())
+ viewer.actions.add("anno-save", lambda s: self.save())
+ viewer.actions.add("anno-show-all", lambda s: self.set_combined_state())
+ viewer.actions.add(
+ "anno-add-segments-from-state",
+ lambda s: self.add_segments_from_state(s.viewer_state),
+ )
+ viewer.actions.add("anno-mark-false-merge", self.mark_false_merge)
+ viewer.actions.add(
+ "anno-unmark-false-merge", lambda s: self.mark_false_merge(s, erase=True)
+ )
+ viewer.actions.add(
+ "anno-decrease-block-size", self.decrease_false_merge_block_size
+ )
+ viewer.actions.add(
+ "anno-increase-block-size", self.increase_false_merge_block_size
+ )
with viewer.config_state.txn() as s:
- s.input_event_bindings.data_view['pageup'] = 'anno-prev-state'
- s.input_event_bindings.data_view['pagedown'] = 'anno-next-state'
- s.input_event_bindings.data_view['bracketleft'] = 'anno-decrease-block-size'
- s.input_event_bindings.data_view['bracketright'] = 'anno-increase-block-size'
- s.input_event_bindings.data_view['control+keys'] = 'anno-save'
- s.input_event_bindings.data_view['control+keya'] = 'anno-show-all'
- s.input_event_bindings.data_view['control+mousedown0'] = 'anno-mark-false-merge'
- s.input_event_bindings.data_view['control+shift+mousedown0'] = 'anno-unmark-false-merge'
+ s.input_event_bindings.data_view["pageup"] = "anno-prev-state"
+ s.input_event_bindings.data_view["pagedown"] = "anno-next-state"
+ s.input_event_bindings.data_view["bracketleft"] = "anno-decrease-block-size"
+ s.input_event_bindings.data_view[
+ "bracketright"
+ ] = "anno-increase-block-size"
+ s.input_event_bindings.data_view["control+keys"] = "anno-save"
+ s.input_event_bindings.data_view["control+keya"] = "anno-show-all"
+ s.input_event_bindings.data_view[
+ "control+mousedown0"
+ ] = "anno-mark-false-merge"
+ s.input_event_bindings.data_view[
+ "control+shift+mousedown0"
+ ] = "anno-unmark-false-merge"
viewer.shared_state.add_changed_callback(self.on_state_changed)
self.cur_message = None
@@ -216,7 +242,9 @@ def __init__(self, filename):
self.set_state_index(None)
def increase_false_merge_block_size(self, s):
- self.false_merge_block_level = min(self.max_false_merge_block_levels, self.false_merge_block_level + 1)
+ self.false_merge_block_level = min(
+ self.max_false_merge_block_levels, self.false_merge_block_level + 1
+ )
self.update_message()
def decrease_false_merge_block_size(self, s):
@@ -234,12 +262,18 @@ def mark_false_merge(self, s, erase=False):
with self.viewer.txn() as s:
annotations = s.layers[self.annotation_layer_name].annotations
- mask = make_block_mask(annotations=annotations, block_size=block_size, max_level=self.max_false_merge_block_levels)
+ mask = make_block_mask(
+ annotations=annotations,
+ block_size=block_size,
+ max_level=self.max_false_merge_block_levels,
+ )
if erase:
mask.remove(level, block_position)
else:
mask.add(level, block_position)
- new_annotations = make_annotations_from_mask(mask=mask, block_size=block_size)
+ new_annotations = make_annotations_from_mask(
+ mask=mask, block_size=block_size
+ )
s.layers[self.annotation_layer_name].annotations = new_annotations
def on_state_changed(self):
@@ -247,25 +281,29 @@ def on_state_changed(self):
self.update_message()
def update_message(self):
- message = '[Block size: %d vx] ' % (self.false_merge_block_size[0] * 2**self.false_merge_block_level)
+ message = "[Block size: %d vx] " % (
+ self.false_merge_block_size[0] * 2**self.false_merge_block_level
+ )
if self.state_index is None:
- message += '[No state selected]'
+ message += "[No state selected]"
else:
- message += '[%d/%d] ' % (self.state_index, len(self.states))
+ message += "[%d/%d] " % (self.state_index, len(self.states))
segments = self.get_state_segment_ids(self.viewer.state)
warnings = []
for segment_id in segments:
other_state = self.other_state_segment_ids.get(segment_id)
if other_state is not None:
- warnings.append('Segment %d also in state %d' % (segment_id, other_state))
+ warnings.append(
+ "Segment %d also in state %d" % (segment_id, other_state)
+ )
if warnings:
- message += 'WARNING: ' + ', '.join(warnings)
+ message += "WARNING: " + ", ".join(warnings)
if message != self.cur_message:
with self.viewer.config_state.txn() as s:
if message is not None:
- s.status_messages['status'] = message
+ s.status_messages["status"] = message
else:
- s.status_messages.pop('status')
+ s.status_messages.pop("status")
self.cur_message = message
def load(self):
@@ -273,18 +311,19 @@ def load(self):
return False
self.state_index = None
- with open(self.filename, 'r') as f:
-
- loaded_state = json.load(f, object_pairs_hook=collections.OrderedDict)
- self.states = [neuroglancer.ViewerState(x) for x in loaded_state['states']]
- self.set_state_index(loaded_state['state_index'])
+ with open(self.filename) as f:
+ loaded_state = json.load(f)
+ self.states = [neuroglancer.ViewerState(x) for x in loaded_state["states"]]
+ self.set_state_index(loaded_state["state_index"])
return True
def set_state_index_relative(self, amount):
if self.state_index is None:
new_state = 0
else:
- new_state = (self.state_index + amount + len(self.states)) % len(self.states)
+ new_state = (self.state_index + amount + len(self.states)) % len(
+ self.states
+ )
self.set_state_index(new_state)
def next_state(self):
@@ -303,11 +342,12 @@ def set_state_index(self, index):
anno_layer = new_state.layers[self.annotation_layer_name]
if anno_layer.annotation_fill_opacity == 0:
anno_layer.annotation_fill_opacity = 0.7
- anno_layer.annotation_color = 'black'
+ anno_layer.annotation_color = "black"
anno_layer.annotations = normalize_block_annotations(
anno_layer.annotations,
block_size=self.false_merge_block_size,
- max_level=self.max_false_merge_block_levels)
+ max_level=self.max_false_merge_block_levels,
+ )
self.viewer.set_state(new_state)
other_ids = self.other_state_segment_ids
other_ids.clear()
@@ -329,7 +369,7 @@ def get_duplicate_segment_ids(self):
for segment_id in other_ids:
state_numbers = other_ids[segment_id]
if len(state_numbers) > 1:
- print('%d in %r' % (segment_id, state_numbers))
+ print("%d in %r" % (segment_id, state_numbers))
def _grab_viewer_state(self):
if self.state_index is not None:
@@ -337,14 +377,19 @@ def _grab_viewer_state(self):
def save(self):
self._grab_viewer_state()
- tmp_filename = self.filename + '.tmp'
- with open(tmp_filename, 'wb') as f:
+ tmp_filename = self.filename + ".tmp"
+ with open(tmp_filename, "wb") as f:
f.write(
json.dumps(
- dict(states=[s.to_json() for s in self.states], state_index=self.state_index),
- default=json_encoder_default))
+ dict(
+ states=[s.to_json() for s in self.states],
+ state_index=self.state_index,
+ ),
+ default=json_encoder_default,
+ )
+ )
os.rename(tmp_filename, self.filename)
- print('Saved state to: %s' % (self.filename, ))
+ print(f"Saved state to: {self.filename}")
def get_state_segment_ids(self, state):
return get_segmentation_layer(state.layers).segments
@@ -367,7 +412,7 @@ def add_segments_from_state(self, base_state):
for segment_id in segment_ids:
if segment_id in existing_segment_ids:
- print('Skipping redundant segment id %d' % segment_id)
+ print("Skipping redundant segment id %d" % segment_id)
continue
self.states.append(self.make_initial_state(segment_id, base_state))
@@ -393,7 +438,7 @@ def remove_zero_segments(self):
def set_combined_state(self):
state = self.make_combined_state()
if state is None:
- print('No states')
+ print("No states")
else:
self.set_state_index(None)
self.viewer.set_state(state)
@@ -430,21 +475,26 @@ def get_sets(self):
return sets
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
- ap.add_argument('filename', type=str)
- ap.add_argument('--add-segments-from-url', type=str, nargs='*', default=[])
+ ap.add_argument("filename", type=str)
+ ap.add_argument("--add-segments-from-url", type=str, nargs="*", default=[])
+ ap.add_argument(
+ "-n", "--no-webbrowser", action="store_true", help="Don't open the webbrowser."
+ )
ap.add_argument(
- '-n', '--no-webbrowser', action='store_true', help='Don\'t open the webbrowser.')
- ap.add_argument('--print-sets', action='store_true', help='Print the sets of supervoxels.')
+ "--print-sets", action="store_true", help="Print the sets of supervoxels."
+ )
ap.add_argument(
- '--print-combined-state',
- action='store_true',
- help='Prints a neuroglancer link for the combined state.')
+ "--print-combined-state",
+ action="store_true",
+ help="Prints a neuroglancer link for the combined state.",
+ )
ap.add_argument(
- '--print-summary',
- action='store_true',
- help='Prints a neuroglancer link for the combined state.')
+ "--print-summary",
+ action="store_true",
+ help="Prints a neuroglancer link for the combined state.",
+ )
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
@@ -461,12 +511,14 @@ def get_sets(self):
print(neuroglancer.to_url(anno.make_combined_state()))
if args.print_summary:
- print('')
- print('%s
' % args.filename)
+ print("")
+ print("%s
" % args.filename)
print(
- 'Neuroglancer
' % neuroglancer.to_url(anno.make_combined_state()))
+ 'Neuroglancer
'
+ % neuroglancer.to_url(anno.make_combined_state())
+ )
print(repr(anno.get_sets()))
- print('')
+ print("")
else:
print(anno.get_viewer_url())
diff --git a/python/neuroglancer/tool/save_meshes.py b/python/neuroglancer/tool/save_meshes.py
index 580af684a6..8c02ca7e34 100755
--- a/python/neuroglancer/tool/save_meshes.py
+++ b/python/neuroglancer/tool/save_meshes.py
@@ -30,54 +30,63 @@
try:
import cloudvolume
except ImportError:
- print('cloud-volume package is required: pip install cloud-volume')
+ print("cloud-volume package is required: pip install cloud-volume")
sys.exit(1)
def save_meshes(state, output_dir, output_format, lod):
for layer in state.layers:
- if not isinstance(layer.layer, neuroglancer.SegmentationLayer): continue
- if not layer.visible: return False
+ if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
+ continue
+ if not layer.visible:
+ return False
for source in layer.source:
- if not source.url.startswith('precomputed://'):
+ if not source.url.startswith("precomputed://"):
continue
vol = cloudvolume.CloudVolume(source.url, parallel=True, progress=True)
- if len(layer.segments) == 0: continue
+ if len(layer.segments) == 0:
+ continue
get_mesh_kwargs = {}
if lod != 0:
get_mesh_kwargs.update(lod=lod)
for segment in layer.segments:
- output_path = os.path.join(output_dir, '%d.%s' % (segment, output_format))
- print('Saving layer %r object %s -> %s' % (layer.name, segment, output_path))
+ output_path = os.path.join(
+ output_dir, "%d.%s" % (segment, output_format)
+ )
+ print(f"Saving layer {layer.name!r} object {segment} -> {output_path}")
os.makedirs(output_dir, exist_ok=True)
mesh = vol.mesh.get(segment, **get_mesh_kwargs)
if isinstance(mesh, dict):
mesh = list(mesh.values())[0]
- if output_format == 'obj':
+ if output_format == "obj":
data = mesh.to_obj()
- elif output_format == 'ply':
+ elif output_format == "ply":
data = mesh.to_ply()
- elif output_format == 'precomputed':
+ elif output_format == "precomputed":
data = mesh.to_precomputed()
- with open(output_path, 'wb') as f:
+ with open(output_path, "wb") as f:
f.write(data)
return
- print('No segmentation layer found')
+ print("No segmentation layer found")
sys.exit(1)
def main(args=None):
ap = argparse.ArgumentParser()
neuroglancer.cli.add_state_arguments(ap, required=True)
- ap.add_argument('--format', choices=['obj', 'ply'], default='obj')
- ap.add_argument('--lod', type=int, default=0, help='Mesh level of detail to download')
- ap.add_argument('--output-dir', default='.')
+ ap.add_argument("--format", choices=["obj", "ply"], default="obj")
+ ap.add_argument(
+ "--lod", type=int, default=0, help="Mesh level of detail to download"
+ )
+ ap.add_argument("--output-dir", default=".")
parsed_args = ap.parse_args()
- save_meshes(state=parsed_args.state,
- output_dir=parsed_args.output_dir,
- output_format=parsed_args.format,
- lod=parsed_args.lod)
+ save_meshes(
+ state=parsed_args.state,
+ output_dir=parsed_args.output_dir,
+ output_format=parsed_args.format,
+ lod=parsed_args.lod,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/python/neuroglancer/tool/screenshot.py b/python/neuroglancer/tool/screenshot.py
index 1c3fd02c8d..2b39975226 100755
--- a/python/neuroglancer/tool/screenshot.py
+++ b/python/neuroglancer/tool/screenshot.py
@@ -72,19 +72,18 @@
"""
import argparse
-import collections
import contextlib
import copy
import datetime
import itertools
-import numbers
import os
import threading
import time
-from typing import NamedTuple, Tuple, Callable, Iterator, List, Optional
+from collections.abc import Iterator
+from typing import Callable, NamedTuple, Optional
-import PIL
import numpy as np
+import PIL
import neuroglancer
import neuroglancer.cli
@@ -111,7 +110,9 @@ def _calculate_num_shards(state, segment_shard_size):
def _get_sharded_states(state, segment_shard_size, reverse_bits):
if reverse_bits:
- sort_key = lambda x: int('{:064b}'.format(x)[::-1], 2)
+
+ def sort_key(x):
+ return int(f"{x:064b}"[::-1], 2)
else:
sort_key = None
num_shards = _calculate_num_shards(state, segment_shard_size)
@@ -136,8 +137,12 @@ class TileGenerator:
def __init__(self, shape, tile_shape):
self.tile_shape = tuple(tile_shape)
self.shape = tuple(shape)
- self.tile_grid_shape = tuple(-(-self.shape[i] // self.tile_shape[i]) for i in range(2))
- self.tile_shape = tuple(-(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2))
+ self.tile_grid_shape = tuple(
+ -(-self.shape[i] // self.tile_shape[i]) for i in range(2)
+ )
+ self.tile_shape = tuple(
+ -(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2)
+ )
self.num_tiles = self.tile_grid_shape[0] * self.tile_grid_shape[1]
def get_tile_states(self, state):
@@ -149,23 +154,25 @@ def get_tile_states(self, state):
tile_height = min(self.tile_shape[1], self.shape[1] - y_offset)
new_state = copy.deepcopy(state)
new_state.partial_viewport = [
- x_offset / self.shape[0], y_offset / self.shape[1], tile_width / self.shape[0],
- tile_height / self.shape[1]
+ x_offset / self.shape[0],
+ y_offset / self.shape[1],
+ tile_width / self.shape[0],
+ tile_height / self.shape[1],
]
params = {
- 'tile_x': tile_x,
- 'tile_y': tile_y,
- 'x_offset': x_offset,
- 'y_offset': y_offset,
- 'tile_width': tile_width,
- 'tile_height': tile_height,
+ "tile_x": tile_x,
+ "tile_y": tile_y,
+ "x_offset": x_offset,
+ "y_offset": y_offset,
+ "tile_width": tile_width,
+ "tile_height": tile_height,
}
yield params, new_state
class ShardedTileGenerator(TileGenerator):
def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):
- super(ShardedTileGenerator, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self.state = state
self.reverse_bits = reverse_bits
self.total_segments = _get_total_segments(self.state)
@@ -175,11 +182,12 @@ def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):
def get_states(self):
for shard_i, state in enumerate(
- _get_sharded_states(self.state,
- self.segment_shard_size,
- reverse_bits=self.reverse_bits)):
+ _get_sharded_states(
+ self.state, self.segment_shard_size, reverse_bits=self.reverse_bits
+ )
+ ):
for params, state in self.get_tile_states(state):
- params['segment_shard'] = shard_i
+ params["segment_shard"] = shard_i
yield params, state
@@ -187,7 +195,9 @@ class CaptureScreenshotRequest(NamedTuple):
state: neuroglancer.ViewerState
description: str
config_callback: Callable[[neuroglancer.viewer_config_state.ConfigState], None]
- response_callback: neuroglancer.viewer_config_state.ScreenshotReply
+ response_callback: Callable[
+ [neuroglancer.viewer_config_state.ScreenshotReply], None
+ ]
include_depth: bool = False
@@ -195,16 +205,18 @@ def buffered_iterator(base_iter, lock, buffer_size):
while True:
with lock:
buffered_items = list(itertools.islice(base_iter, buffer_size))
- if not buffered_items: break
- for item in buffered_items:
- yield item
-
-
-def capture_screenshots(viewer: neuroglancer.Viewer,
- request_iter: Iterator[CaptureScreenshotRequest],
- refresh_browser_callback: Callable[[], None],
- refresh_browser_timeout: int,
- num_to_prefetch: int = 1) -> None:
+ if not buffered_items:
+ break
+ yield from buffered_items
+
+
+def capture_screenshots(
+ viewer: neuroglancer.Viewer,
+ request_iter: Iterator[CaptureScreenshotRequest],
+ refresh_browser_callback: Callable[[], None],
+ refresh_browser_timeout: int,
+ num_to_prefetch: int = 1,
+) -> None:
prefetch_buffer = list(itertools.islice(request_iter, num_to_prefetch + 1))
while prefetch_buffer:
with viewer.config_state.txn() as s:
@@ -213,14 +225,19 @@ def capture_screenshots(viewer: neuroglancer.Viewer,
del s.prefetch[:]
for i, request in enumerate(prefetch_buffer[1:]):
s.prefetch.append(
- neuroglancer.PrefetchState(state=request.state, priority=num_to_prefetch - i))
+ neuroglancer.PrefetchState(
+ state=request.state, priority=num_to_prefetch - i
+ )
+ )
request = prefetch_buffer[0]
request.config_callback(s)
viewer.set_state(request.state)
- print('%s [%s] Requesting screenshot' % (
- datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
- request.description,
- ))
+ print(
+ "{} [{}] Requesting screenshot".format(
+ datetime.datetime.now().strftime("%Y-%m-%dT%H:%M%S.%f"),
+ request.description,
+ )
+ )
last_statistics_time = time.time()
def statistics_callback(statistics):
@@ -228,15 +245,16 @@ def statistics_callback(statistics):
last_statistics_time = time.time()
total = statistics.total
print(
- '%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading'
+ "%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading"
% (
- datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
+ datetime.datetime.now().strftime("%Y-%m-%dT%H:%M%S.%f"),
request.description,
total.visible_chunks_gpu_memory,
total.visible_chunks_total,
total.visible_gpu_memory,
total.visible_chunks_downloading,
- ))
+ )
+ )
event = threading.Event()
screenshot = None
@@ -262,6 +280,7 @@ def get_timeout():
continue
last_statistics_time = time.time()
refresh_browser_callback()
+ assert screenshot is not None
request.response_callback(screenshot)
del prefetch_buffer[0]
next_request = next(request_iter, None)
@@ -269,11 +288,14 @@ def get_timeout():
prefetch_buffer.append(next_request)
-def capture_screenshots_in_parallel(viewers: List[Tuple[neuroglancer.Viewer, Callable[[], None]]],
- request_iter: Iterator[CaptureScreenshotRequest],
- refresh_browser_timeout: numbers.Number, num_to_prefetch: int,
- total_requests: Optional[int] = None,
- buffer_size: Optional[int] = None):
+def capture_screenshots_in_parallel(
+ viewers: list[tuple[neuroglancer.Viewer, Callable[[], None]]],
+ request_iter: Iterator[CaptureScreenshotRequest],
+ refresh_browser_timeout: float,
+ num_to_prefetch: int,
+ total_requests: Optional[int] = None,
+ buffer_size: Optional[int] = None,
+):
if buffer_size is None:
if total_requests is None:
copy_of_requests = list(request_iter)
@@ -286,9 +308,9 @@ def capture_screenshots_in_parallel(viewers: List[Tuple[neuroglancer.Viewer, Cal
for viewer, refresh_browser_callback in viewers:
def capture_func(viewer, refresh_browser_callback):
- viewer_request_iter = buffered_iterator(base_iter=request_iter,
- lock=buffer_lock,
- buffer_size=buffer_size)
+ viewer_request_iter = buffered_iterator(
+ base_iter=request_iter, lock=buffer_lock, buffer_size=buffer_size
+ )
capture_screenshots(
viewer=viewer,
request_iter=viewer_request_iter,
@@ -297,7 +319,9 @@ def capture_func(viewer, refresh_browser_callback):
refresh_browser_callback=refresh_browser_callback,
)
- t = threading.Thread(target=capture_func, args=(viewer, refresh_browser_callback))
+ t = threading.Thread(
+ target=capture_func, args=(viewer, refresh_browser_callback)
+ )
t.start()
threads.append(t)
for t in threads:
@@ -305,21 +329,23 @@ def capture_func(viewer, refresh_browser_callback):
class MultiCapturer:
- def __init__(self,
- shape,
- include_depth,
- output,
- config_callback,
- num_to_prefetch,
- checkpoint_interval=60):
+ def __init__(
+ self,
+ shape,
+ include_depth,
+ output,
+ config_callback,
+ num_to_prefetch,
+ checkpoint_interval=60,
+ ):
self.include_depth = include_depth
self.checkpoint_interval = checkpoint_interval
self.config_callback = config_callback
self.num_to_prefetch = num_to_prefetch
self.output = output
self._processed = set()
- self.state_file = output + '.npz'
- self.temp_state_file = self.state_file + '.tmp'
+ self.state_file = output + ".npz"
+ self.temp_state_file = self.state_file + ".tmp"
self.image_array = np.zeros((shape[1], shape[0], 4), dtype=np.uint8)
if self.include_depth:
self.depth_array = np.zeros((shape[1], shape[0]), dtype=np.float32)
@@ -336,32 +362,32 @@ def _load_state(self):
return
with np.load(self.state_file, allow_pickle=True) as f:
if self.include_depth:
- self.depth_array = f['depth']
- self.image_array = f['image']
- self._processed = set(f['processed'].ravel()[0])
+ self.depth_array = f["depth"]
+ self.image_array = f["image"]
+ self._processed = set(f["processed"].ravel()[0])
def _save_state(self, save_image=False):
with self._add_image_lock:
processed = set(self._processed)
- with open(self.temp_state_file, 'wb') as f:
+ with open(self.temp_state_file, "wb") as f:
save_arrays = {
- 'image': self.image_array,
- 'processed': processed,
+ "image": self.image_array,
+ "processed": processed,
}
if self.include_depth:
- save_arrays['depth'] = self.depth_array
+ save_arrays["depth"] = self.depth_array
np.savez_compressed(f, **save_arrays)
os.replace(self.temp_state_file, self.state_file)
if save_image:
self._save_image()
def _save_state_async(self, save_image=False):
- print('Starting checkpointing')
+ print("Starting checkpointing")
def func():
try:
self._save_state()
- print('Done checkpointing')
+ print("Done checkpointing")
finally:
self._save_state_in_progress.set()
@@ -374,13 +400,17 @@ def _save_image(self):
def _add_image(self, params, screenshot):
with self._add_image_lock:
tile_image = screenshot.image_pixels
- tile_selector = np.s_[params['y_offset']:params['y_offset'] + params['tile_height'],
- params['x_offset']:params['x_offset'] + params['tile_width']]
+ tile_selector = np.s_[
+ params["y_offset"] : params["y_offset"] + params["tile_height"],
+ params["x_offset"] : params["x_offset"] + params["tile_width"],
+ ]
if self.include_depth:
tile_depth = screenshot.depth_array
depth_array_part = self.depth_array[tile_selector]
- mask = np.logical_and(np.logical_or(tile_depth != 0, depth_array_part == 0),
- tile_depth >= depth_array_part)
+ mask = np.logical_and(
+ np.logical_or(tile_depth != 0, depth_array_part == 0),
+ tile_depth >= depth_array_part,
+ )
depth_array_part[mask] = tile_depth[mask]
else:
mask = Ellipsis
@@ -388,11 +418,18 @@ def _add_image(self, params, screenshot):
self._processed.add(self._get_description(params))
self._num_states_processed += 1
elapsed = time.time() - self._start_time
- print('%4d tiles rendered in %5d seconds: %.1f seconds/tile' %
- (self._num_states_processed, elapsed, elapsed / self._num_states_processed))
+ print(
+ "%4d tiles rendered in %5d seconds: %.1f seconds/tile"
+ % (
+ self._num_states_processed,
+ elapsed,
+ elapsed / self._num_states_processed,
+ )
+ )
def _maybe_save_state(self):
- if not self._save_state_in_progress.is_set(): return
+ if not self._save_state_in_progress.is_set():
+ return
with self._add_image_lock:
if self._last_save_time + self.checkpoint_interval < time.time():
self._last_save_time = time.time()
@@ -400,46 +437,58 @@ def _maybe_save_state(self):
self._save_state_async(save_image=False)
def _get_description(self, params):
- segment_shard = params.get('segment_shard')
+ segment_shard = params.get("segment_shard")
if segment_shard is not None:
- prefix = 'segment_shard=%d ' % (segment_shard, )
+ prefix = "segment_shard=%d " % (segment_shard,)
else:
- prefix = ''
- return '%stile_x=%d tile_y=%d' % (prefix, params['tile_x'], params['tile_y'])
+ prefix = ""
+ return "%stile_x=%d tile_y=%d" % (prefix, params["tile_x"], params["tile_y"])
def _make_capture_request(self, params, state):
description = self._get_description(params)
- if description in self._processed: return None
+ if description in self._processed:
+ return None
def config_callback(s):
- s.viewer_size = (params['tile_width'], params['tile_height'])
+ s.viewer_size = (params["tile_width"], params["tile_height"])
self.config_callback(s)
def response_callback(screenshot):
self._add_image(params, screenshot)
self._maybe_save_state()
- return CaptureScreenshotRequest(state=state,
- description=self._get_description(params),
- config_callback=config_callback,
- response_callback=response_callback,
- include_depth=self.include_depth)
+ return CaptureScreenshotRequest(
+ state=state,
+ description=self._get_description(params),
+ config_callback=config_callback,
+ response_callback=response_callback,
+ include_depth=self.include_depth,
+ )
def _get_capture_screenshot_request_iter(self, state_iter):
for params, state in state_iter:
request = self._make_capture_request(params, state)
- if request is not None: yield request
-
- def capture(self, viewers, state_iter, refresh_browser_timeout: int, save_depth: bool, total_requests: int):
+ if request is not None:
+ yield request
+
+ def capture(
+ self,
+ viewers,
+ state_iter,
+ refresh_browser_timeout: int,
+ save_depth: bool,
+ total_requests: int,
+ ):
capture_screenshots_in_parallel(
viewers=viewers,
request_iter=self._get_capture_screenshot_request_iter(state_iter),
refresh_browser_timeout=refresh_browser_timeout,
num_to_prefetch=self.num_to_prefetch,
- total_requests=total_requests)
+ total_requests=total_requests,
+ )
if not self._save_state_in_progress.is_set():
- print('Waiting for previous save state to complete')
+ print("Waiting for previous save state to complete")
self._save_state_in_progress.wait()
if save_depth:
self._save_state()
@@ -458,11 +507,15 @@ def config_callback(s):
shape=(args.width, args.height),
tile_shape=(args.tile_width, args.tile_height),
)
- if segment_shard_size is not None and _should_shard_segments(state, segment_shard_size):
- gen = ShardedTileGenerator(state=state,
- segment_shard_size=segment_shard_size,
- reverse_bits=args.sort_segments_by_reversed_bits,
- **tile_parameters)
+ if segment_shard_size is not None and _should_shard_segments(
+ state, segment_shard_size
+ ):
+ gen = ShardedTileGenerator(
+ state=state,
+ segment_shard_size=segment_shard_size,
+ reverse_bits=args.sort_segments_by_reversed_bits,
+ **tile_parameters,
+ )
num_states = gen.num_tiles
state_iter = gen.get_states()
include_depth = True
@@ -473,7 +526,7 @@ def config_callback(s):
include_depth = False
capturer = MultiCapturer(
- shape=tile_parameters['shape'],
+ shape=tile_parameters["shape"],
include_depth=include_depth,
output=args.output,
config_callback=config_callback,
@@ -483,26 +536,37 @@ def config_callback(s):
num_output_shards = args.num_output_shards
tiles_per_output_shard = args.tiles_per_output_shard
output_shard = args.output_shard
- if (output_shard is None) != (num_output_shards is None and tiles_per_output_shard is None):
+ if (output_shard is None) != (
+ num_output_shards is None and tiles_per_output_shard is None
+ ):
raise ValueError(
- '--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard'
+ "--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard"
)
if output_shard is not None:
if num_output_shards is not None:
if num_output_shards < 1:
- raise ValueError('Invalid --num-output-shards: %d' % (num_output_shards, ))
+ raise ValueError(
+ "Invalid --num-output-shards: %d" % (num_output_shards,)
+ )
states_per_shard = -(-num_states // num_output_shards)
else:
if tiles_per_output_shard < 1:
- raise ValueError('Invalid --tiles-per-output-shard: %d' %
- (tiles_per_output_shard, ))
+ raise ValueError(
+ "Invalid --tiles-per-output-shard: %d" % (tiles_per_output_shard,)
+ )
num_output_shards = -(-num_states // tiles_per_output_shard)
states_per_shard = tiles_per_output_shard
if output_shard < 0 or output_shard >= num_output_shards:
- raise ValueError('Invalid --output-shard: %d' % (output_shard, ))
- print('Total states: %d, Number of output shards: %d' % (num_states, num_output_shards))
- state_iter = itertools.islice(state_iter, states_per_shard * output_shard,
- states_per_shard * (output_shard + 1))
+ raise ValueError("Invalid --output-shard: %d" % (output_shard,))
+ print(
+ "Total states: %d, Number of output shards: %d"
+ % (num_states, num_output_shards)
+ )
+ state_iter = itertools.islice(
+ state_iter,
+ states_per_shard * output_shard,
+ states_per_shard * (output_shard + 1),
+ )
else:
states_per_shard = num_states
capturer.capture(
@@ -515,34 +579,52 @@ def config_callback(s):
def define_state_modification_args(ap: argparse.ArgumentParser):
- ap.add_argument('--hide-axis-lines',
- dest='show_axis_lines',
- action='store_false',
- help='Override showAxisLines setting in state.')
- ap.add_argument('--hide-default-annotations',
- action='store_false',
- dest='show_default_annotations',
- help='Override showDefaultAnnotations setting in state.')
- ap.add_argument('--projection-scale-multiplier',
- type=float,
- help='Multiply projection view scale by specified factor.')
- ap.add_argument('--system-memory-limit',
- type=int,
- default=3 * 1024 * 1024 * 1024,
- help='System memory limit')
- ap.add_argument('--gpu-memory-limit',
- type=int,
- default=3 * 1024 * 1024 * 1024,
- help='GPU memory limit')
- ap.add_argument('--concurrent-downloads', type=int, default=32, help='Concurrent downloads')
- ap.add_argument('--layout', type=str, help='Override layout setting in state.')
- ap.add_argument('--cross-section-background-color',
- type=str,
- help='Background color for cross sections.')
- ap.add_argument('--scale-bar-scale', type=float, help='Scale factor for scale bar', default=1)
-
-
-def apply_state_modifications(state: neuroglancer.ViewerState, args: argparse.Namespace):
+ ap.add_argument(
+ "--hide-axis-lines",
+ dest="show_axis_lines",
+ action="store_false",
+ help="Override showAxisLines setting in state.",
+ )
+ ap.add_argument(
+ "--hide-default-annotations",
+ action="store_false",
+ dest="show_default_annotations",
+ help="Override showDefaultAnnotations setting in state.",
+ )
+ ap.add_argument(
+ "--projection-scale-multiplier",
+ type=float,
+ help="Multiply projection view scale by specified factor.",
+ )
+ ap.add_argument(
+ "--system-memory-limit",
+ type=int,
+ default=3 * 1024 * 1024 * 1024,
+ help="System memory limit",
+ )
+ ap.add_argument(
+ "--gpu-memory-limit",
+ type=int,
+ default=3 * 1024 * 1024 * 1024,
+ help="GPU memory limit",
+ )
+ ap.add_argument(
+ "--concurrent-downloads", type=int, default=32, help="Concurrent downloads"
+ )
+ ap.add_argument("--layout", type=str, help="Override layout setting in state.")
+ ap.add_argument(
+ "--cross-section-background-color",
+ type=str,
+ help="Background color for cross sections.",
+ )
+ ap.add_argument(
+ "--scale-bar-scale", type=float, help="Scale factor for scale bar", default=1
+ )
+
+
+def apply_state_modifications(
+ state: neuroglancer.ViewerState, args: argparse.Namespace
+):
state.selected_layer.visible = False
state.statistics.visible = False
if args.layout is not None:
@@ -562,70 +644,82 @@ def apply_state_modifications(state: neuroglancer.ViewerState, args: argparse.Na
def define_viewer_args(ap: argparse.ArgumentParser):
- ap.add_argument('--browser', choices=['chrome', 'firefox'], default='chrome')
- ap.add_argument('--no-webdriver',
- action='store_true',
- help='Do not open browser automatically via webdriver.')
- ap.add_argument('--no-headless',
- dest='headless',
- action='store_false',
- help='Use non-headless webdriver.')
- ap.add_argument('--docker-chromedriver',
- action='store_true',
- help='Run Chromedriver with options suitable for running inside docker')
- ap.add_argument('--debug-chromedriver',
- action='store_true',
- help='Enable debug logging in Chromedriver')
- ap.add_argument('--jobs',
- '-j',
- type=int,
- default=1,
- help='Number of browsers to use concurrently. '
- 'This may improve performance at the cost of greater memory usage. '
- 'On a 64GiB 16 hyperthread machine, --jobs=6 works well.')
+ ap.add_argument("--browser", choices=["chrome", "firefox"], default="chrome")
+ ap.add_argument(
+ "--no-webdriver",
+ action="store_true",
+ help="Do not open browser automatically via webdriver.",
+ )
+ ap.add_argument(
+ "--no-headless",
+ dest="headless",
+ action="store_false",
+ help="Use non-headless webdriver.",
+ )
+ ap.add_argument(
+ "--docker-chromedriver",
+ action="store_true",
+ help="Run Chromedriver with options suitable for running inside docker",
+ )
+ ap.add_argument(
+ "--debug-chromedriver",
+ action="store_true",
+ help="Enable debug logging in Chromedriver",
+ )
+ ap.add_argument(
+ "--jobs",
+ "-j",
+ type=int,
+ default=1,
+ help="Number of browsers to use concurrently. "
+ "This may improve performance at the cost of greater memory usage. "
+ "On a 64GiB 16 hyperthread machine, --jobs=6 works well.",
+ )
def define_size_args(ap: argparse.ArgumentParser):
- ap.add_argument('--width', type=int, default=3840, help='Width in pixels of image.')
- ap.add_argument('--height', type=int, default=2160, help='Height in pixels of image.')
+ ap.add_argument("--width", type=int, default=3840, help="Width in pixels of image.")
+ ap.add_argument(
+ "--height", type=int, default=2160, help="Height in pixels of image."
+ )
def define_tile_args(ap: argparse.ArgumentParser):
ap.add_argument(
- '--tile-width',
+ "--tile-width",
type=int,
default=4096,
- help=
- 'Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.'
+ help="Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.",
)
ap.add_argument(
- '--tile-height',
+ "--tile-height",
type=int,
default=4096,
- help=
- 'Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.'
+ help="Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.",
+ )
+ ap.add_argument(
+ "--segment-shard-size",
+ type=int,
+ help="Maximum number of segments to render simultaneously. "
+ "If the number of selected segments exceeds this number, "
+ "multiple passes will be used (transparency not supported).",
)
- ap.add_argument('--segment-shard-size',
- type=int,
- help='Maximum number of segments to render simultaneously. '
- 'If the number of selected segments exceeds this number, '
- 'multiple passes will be used (transparency not supported).')
ap.add_argument(
- '--sort-segments-by-reversed-bits',
- action='store_true',
- help=
- 'When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.'
+ "--sort-segments-by-reversed-bits",
+ action="store_true",
+ help="When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.",
)
def define_capture_args(ap: argparse.ArgumentParser):
- ap.add_argument('--prefetch', type=int, default=1, help='Number of states to prefetch.')
ap.add_argument(
- '--refresh-browser-timeout',
+ "--prefetch", type=int, default=1, help="Number of states to prefetch."
+ )
+ ap.add_argument(
+ "--refresh-browser-timeout",
type=int,
default=60,
- help=
- 'Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.'
+ help="Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.",
)
@@ -633,12 +727,12 @@ def define_capture_args(ap: argparse.ArgumentParser):
def get_viewers(args: argparse.Namespace):
if args.no_webdriver:
viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]
- print('Open the following URLs to begin rendering')
+ print("Open the following URLs to begin rendering")
for viewer in viewers:
print(viewer)
def refresh_browser_callback():
- print('Browser unresponsive, consider reloading')
+ print("Browser unresponsive, consider reloading")
yield [(viewer, refresh_browser_callback) for viewer in viewers]
else:
@@ -652,20 +746,22 @@ def _make_webdriver():
)
def refresh_browser_callback():
- print('Browser unresponsive, reloading')
+ print("Browser unresponsive, reloading")
webdriver.reload_browser()
return webdriver, refresh_browser_callback
webdrivers = [_make_webdriver() for _ in range(args.jobs)]
try:
- yield [(webdriver.viewer, refresh_browser_callback)
- for webdriver, refresh_browser_callback in webdrivers]
+ yield [
+ (webdriver.viewer, refresh_browser_callback)
+ for webdriver, refresh_browser_callback in webdrivers
+ ]
finally:
for webdriver, _ in webdrivers:
try:
webdriver.__exit__()
- except:
+ except Exception:
pass
@@ -682,20 +778,22 @@ def main(args=None):
neuroglancer.cli.add_server_arguments(ap)
neuroglancer.cli.add_state_arguments(ap, required=True)
- ap.add_argument('output', help='Output path of screenshot file in PNG format.')
+ ap.add_argument("output", help="Output path of screenshot file in PNG format.")
- ap.add_argument('--output-shard', type=int, help='Output shard to write.')
+ ap.add_argument("--output-shard", type=int, help="Output shard to write.")
output_shard_group = ap.add_mutually_exclusive_group(required=False)
- output_shard_group.add_argument('--num-output-shards',
- type=int,
- help='Number of output shards.')
- output_shard_group.add_argument('--tiles-per-output-shard',
- type=int,
- help='Number of tiles per output shard.')
- ap.add_argument('--checkpoint-interval',
- type=float,
- default=60,
- help='Interval in seconds at which to save checkpoints.')
+ output_shard_group.add_argument(
+ "--num-output-shards", type=int, help="Number of output shards."
+ )
+ output_shard_group.add_argument(
+ "--tiles-per-output-shard", type=int, help="Number of tiles per output shard."
+ )
+ ap.add_argument(
+ "--checkpoint-interval",
+ type=float,
+ default=60,
+ help="Interval in seconds at which to save checkpoints.",
+ )
define_state_modification_args(ap)
define_viewer_args(ap)
@@ -706,5 +804,5 @@ def main(args=None):
run(ap.parse_args(args))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/python/neuroglancer/tool/video_tool.py b/python/neuroglancer/tool/video_tool.py
index bb061caed9..8871758675 100755
--- a/python/neuroglancer/tool/video_tool.py
+++ b/python/neuroglancer/tool/video_tool.py
@@ -58,7 +58,6 @@
- the width and height are based on the browser size.
"""
-from __future__ import print_function, division
import argparse
import bisect
@@ -67,17 +66,16 @@
import os
import threading
import time
-from typing import Tuple
import webbrowser
import atomicwrites
+
import neuroglancer
import neuroglancer.cli
import neuroglancer.tool.screenshot
-class PlaybackManager(object):
-
+class PlaybackManager:
def __init__(self, keypoints, frames_per_second):
self.keypoints = keypoints
self.frames_per_second = frames_per_second
@@ -86,11 +84,13 @@ def __init__(self, keypoints, frames_per_second):
self.keypoint_start_frame = []
self.keypoint_end_frame = []
for k in keypoints[:-1]:
- duration = k['transition_duration']
+ duration = k["transition_duration"]
if duration == 0:
cur_frames = 0
else:
- cur_frames = max(1, int(round(k['transition_duration'] * frames_per_second)))
+ cur_frames = max(
+ 1, int(round(k["transition_duration"] * frames_per_second))
+ )
self.keypoint_start_frame.append(self.total_frames)
self.total_frames += cur_frames
self.keypoint_end_frame.append(self.total_frames)
@@ -108,12 +108,12 @@ def get_frame_from_elapsed_time(self, elapsed_time):
def get_frame(self, frame_i):
start_keypoint = self.get_keypoint_from_frame(frame_i)
- a = self.keypoints[start_keypoint]['state']
+ a = self.keypoints[start_keypoint]["state"]
if start_keypoint == len(self.keypoints) - 1:
return a
else:
end_keypoint = start_keypoint + 1
- b = self.keypoints[end_keypoint]['state']
+ b = self.keypoints[end_keypoint]["state"]
start_frame = self.keypoint_start_frame[start_keypoint]
end_frame = self.keypoint_end_frame[start_keypoint]
t = (frame_i - start_frame) / (end_frame - start_frame)
@@ -132,24 +132,29 @@ def set_state(self, viewer, frame_i, prefetch_frames):
del s.prefetch[:]
for i, state in enumerate(states[1:]):
s.prefetch.append(
- neuroglancer.PrefetchState(state=state, priority=prefetch_frames - i))
-
+ neuroglancer.PrefetchState(
+ state=state, priority=prefetch_frames - i
+ )
+ )
-class EditorPlaybackManager(object):
+class EditorPlaybackManager:
def __init__(self, script_editor, playing=True, frames_per_second=5):
self.script_editor = script_editor
self.frames_per_second = frames_per_second
- self.playback_manager = PlaybackManager(script_editor.keypoints,
- frames_per_second=self.frames_per_second)
+ self.playback_manager = PlaybackManager(
+ script_editor.keypoints, frames_per_second=self.frames_per_second
+ )
self.current_keypoint_index = max(1, script_editor.keypoint_index)
self.script_editor._set_keypoint_index(self.current_keypoint_index)
self.playing = playing
script_editor.playback_manager = self
- self.current_frame = self.playback_manager.keypoint_start_frame[self.current_keypoint_index
- - 1]
- self.start_time = (time.time() -
- self.current_frame / self.playback_manager.frames_per_second)
+ self.current_frame = self.playback_manager.keypoint_start_frame[
+ self.current_keypoint_index - 1
+ ]
+ self.start_time = (
+ time.time() - self.current_frame / self.playback_manager.frames_per_second
+ )
t = threading.Thread(target=self._thread_func)
t.daemon = True
t.start()
@@ -158,18 +163,31 @@ def __init__(self, script_editor, playing=True, frames_per_second=5):
def _update_current_frame(self):
elapsed_time = time.time() - self.start_time
- self.current_frame = self.playback_manager.get_frame_from_elapsed_time(elapsed_time)
+ self.current_frame = self.playback_manager.get_frame_from_elapsed_time(
+ elapsed_time
+ )
def _display_frame(self):
frame_i = self.current_frame
- keypoint_index = self.playback_manager.get_keypoint_from_frame(
- min(frame_i, self.playback_manager.total_frames - 1)) + 1
- current_duration = self.script_editor.keypoints[keypoint_index - 1]['transition_duration']
- transition_time = (frame_i - self.playback_manager.keypoint_start_frame[keypoint_index - 1]
- ) / self.playback_manager.frames_per_second
- self.playback_status = '%s frame %d/%d transition %.1f/%g' % (
- 'PLAYING' if self.playing else 'PAUSED', frame_i, self.playback_manager.total_frames,
- transition_time, current_duration)
+ keypoint_index = (
+ self.playback_manager.get_keypoint_from_frame(
+ min(frame_i, self.playback_manager.total_frames - 1)
+ )
+ + 1
+ )
+ current_duration = self.script_editor.keypoints[keypoint_index - 1][
+ "transition_duration"
+ ]
+ transition_time = (
+ frame_i - self.playback_manager.keypoint_start_frame[keypoint_index - 1]
+ ) / self.playback_manager.frames_per_second
+ self.playback_status = "%s frame %d/%d transition %.1f/%g" % (
+ "PLAYING" if self.playing else "PAUSED",
+ frame_i,
+ self.playback_manager.total_frames,
+ transition_time,
+ current_duration,
+ )
if keypoint_index != self.current_keypoint_index:
self.script_editor._set_keypoint_index(keypoint_index)
self.current_keypoint_index = keypoint_index
@@ -178,12 +196,15 @@ def _display_frame(self):
self.script_editor._update_status()
self.should_stop.set()
return
- self.playback_manager.set_state(self.script_editor.viewer, frame_i, prefetch_frames=10)
+ self.playback_manager.set_state(
+ self.script_editor.viewer, frame_i, prefetch_frames=10
+ )
self.script_editor._update_status()
def reload(self):
- self.playback_manager = PlaybackManager(self.script_editor.keypoints,
- frames_per_second=self.frames_per_second)
+ self.playback_manager = PlaybackManager(
+ self.script_editor.keypoints, frames_per_second=self.frames_per_second
+ )
self.current_keypoint_index = None
self.seek_frame(0)
@@ -191,8 +212,10 @@ def pause(self):
if self.playing:
self.seek_frame(0)
else:
- self.start_time = time.time(
- ) - self.current_frame / self.playback_manager.frames_per_second
+ self.start_time = (
+ time.time()
+ - self.current_frame / self.playback_manager.frames_per_second
+ )
self.playing = True
def seek_frame(self, amount):
@@ -200,7 +223,9 @@ def seek_frame(self, amount):
self.playing = False
self._update_current_frame()
self.current_frame += amount
- self.current_frame = max(0, min(self.current_frame, self.playback_manager.total_frames - 1))
+ self.current_frame = max(
+ 0, min(self.current_frame, self.playback_manager.total_frames - 1)
+ )
self._display_frame()
def _thread_func(self):
@@ -222,7 +247,7 @@ def _update(self):
def load_script(script_path, transition_duration=1):
keypoints = []
- with open(script_path, 'r') as f:
+ with open(script_path) as f:
while True:
url = f.readline()
if not url:
@@ -232,20 +257,19 @@ def load_script(script_path, transition_duration=1):
duration = transition_duration
else:
duration = float(line)
- keypoints.append({
- 'state': neuroglancer.parse_url(url),
- 'transition_duration': duration
- })
+ keypoints.append(
+ {"state": neuroglancer.parse_url(url), "transition_duration": duration}
+ )
return keypoints
def save_script(script_path, keypoints):
- temp_path = script_path + '.tmp'
- with open(temp_path, 'w') as f:
+ temp_path = script_path + ".tmp"
+ with open(temp_path, "w") as f:
for x in keypoints:
- f.write(neuroglancer.to_url(x['state']) + '\n')
- f.write(str(x['transition_duration']) + '\n')
- if hasattr(os, 'replace'):
+ f.write(neuroglancer.to_url(x["state"]) + "\n")
+ f.write(str(x["transition_duration"]) + "\n")
+ if hasattr(os, "replace"):
# Only available on Python3
os.replace(temp_path, script_path)
else:
@@ -253,10 +277,16 @@ def save_script(script_path, keypoints):
os.rename(temp_path, script_path)
-class ScriptEditor(object):
-
- def __init__(self, script_path, transition_duration, fullscreen_width, fullscreen_height,
- fullscreen_scale_bar_scale, frames_per_second):
+class ScriptEditor:
+ def __init__(
+ self,
+ script_path,
+ transition_duration,
+ fullscreen_width,
+ fullscreen_height,
+ fullscreen_scale_bar_scale,
+ frames_per_second,
+ ):
self.script_path = script_path
self.viewer = neuroglancer.Viewer()
self.frames_per_second = frames_per_second
@@ -276,47 +306,49 @@ def __init__(self, script_path, transition_duration, fullscreen_width, fullscree
self.is_dirty = True
self.is_fullscreen = False
keybindings = [
- ('keyk', 'add-keypoint'),
- ('bracketleft', 'prev-keypoint'),
- ('bracketright', 'next-keypoint'),
- ('backspace', 'delete-keypoint'),
- ('shift+bracketleft', 'decrease-duration'),
- ('shift+bracketright', 'increase-duration'),
- ('home', 'first-keypoint'),
- ('end', 'last-keypoint'),
- ('keyq', 'quit'),
- ('enter', 'toggle-play'),
- ('keyf', 'toggle-fullscreen'),
- ('keyj', 'revert-script'),
- ('comma', 'prev-frame'),
- ('period', 'next-frame'),
+ ("keyk", "add-keypoint"),
+ ("bracketleft", "prev-keypoint"),
+ ("bracketright", "next-keypoint"),
+ ("backspace", "delete-keypoint"),
+ ("shift+bracketleft", "decrease-duration"),
+ ("shift+bracketright", "increase-duration"),
+ ("home", "first-keypoint"),
+ ("end", "last-keypoint"),
+ ("keyq", "quit"),
+ ("enter", "toggle-play"),
+ ("keyf", "toggle-fullscreen"),
+ ("keyj", "revert-script"),
+ ("comma", "prev-frame"),
+ ("period", "next-frame"),
]
with self.viewer.config_state.txn() as s:
for k, a in keybindings:
s.input_event_bindings.viewer[k] = a
s.input_event_bindings.slice_view[k] = a
s.input_event_bindings.perspective_view[k] = a
- self._keybinding_message = ' '.join('%s=%s' % x for x in keybindings)
- self.viewer.actions.add('add-keypoint', self._add_keypoint)
- self.viewer.actions.add('prev-keypoint', self._prev_keypoint)
- self.viewer.actions.add('next-keypoint', self._next_keypoint)
- self.viewer.actions.add('delete-keypoint', self._delete_keypoint)
- self.viewer.actions.add('increase-duration', self._increase_duration)
- self.viewer.actions.add('decrease-duration', self._decrease_duration)
- self.viewer.actions.add('first-keypoint', self._first_keypoint)
- self.viewer.actions.add('last-keypoint', self._last_keypoint)
- self.viewer.actions.add('quit', self._quit)
- self.viewer.actions.add('toggle-play', self._toggle_play)
- self.viewer.actions.add('toggle-fullscreen', self._toggle_fullscreen)
- self.viewer.actions.add('revert-script', self._revert_script)
- self.viewer.actions.add('next-frame', self._next_frame)
- self.viewer.actions.add('prev-frame', self._prev_frame)
+ self._keybinding_message = " ".join("{}={}".format(*x) for x in keybindings)
+ self.viewer.actions.add("add-keypoint", self._add_keypoint)
+ self.viewer.actions.add("prev-keypoint", self._prev_keypoint)
+ self.viewer.actions.add("next-keypoint", self._next_keypoint)
+ self.viewer.actions.add("delete-keypoint", self._delete_keypoint)
+ self.viewer.actions.add("increase-duration", self._increase_duration)
+ self.viewer.actions.add("decrease-duration", self._decrease_duration)
+ self.viewer.actions.add("first-keypoint", self._first_keypoint)
+ self.viewer.actions.add("last-keypoint", self._last_keypoint)
+ self.viewer.actions.add("quit", self._quit)
+ self.viewer.actions.add("toggle-play", self._toggle_play)
+ self.viewer.actions.add("toggle-fullscreen", self._toggle_fullscreen)
+ self.viewer.actions.add("revert-script", self._revert_script)
+ self.viewer.actions.add("next-frame", self._next_frame)
+ self.viewer.actions.add("prev-frame", self._prev_frame)
self.playback_manager = None
self._set_keypoint_index(1)
def _revert_script(self, s):
if os.path.exists(self.script_path):
- self.keypoints = load_script(self.script_path, self.default_transition_duration)
+ self.keypoints = load_script(
+ self.script_path, self.default_transition_duration
+ )
if self.playback_manager is not None:
self.playback_manager.reload()
else:
@@ -338,19 +370,23 @@ def _toggle_fullscreen(self, s):
def _next_frame(self, s):
if self.playback_manager is None:
- EditorPlaybackManager(self, playing=False, frames_per_second=self.frames_per_second)
+ EditorPlaybackManager(
+ self, playing=False, frames_per_second=self.frames_per_second
+ )
self.playback_manager.seek_frame(1)
def _prev_frame(self, s):
if self.playback_manager is None:
- EditorPlaybackManager(self, playing=False, frames_per_second=self.frames_per_second)
+ EditorPlaybackManager(
+ self, playing=False, frames_per_second=self.frames_per_second
+ )
self.playback_manager.seek_frame(-1)
def _add_keypoint(self, s):
- self.keypoints.insert(self.keypoint_index, {
- 'state': s.viewer_state,
- 'transition_duration': self.transition_duration
- })
+ self.keypoints.insert(
+ self.keypoint_index,
+ {"state": s.viewer_state, "transition_duration": self.transition_duration},
+ )
self.keypoint_index += 1
self.is_dirty = False
self.save()
@@ -369,7 +405,7 @@ def _set_transition_duration(self, value):
self._stop_playback()
self.transition_duration = value
if self.keypoint_index > 0:
- self.keypoints[self.keypoint_index - 1]['transition_duration'] = value
+ self.keypoints[self.keypoint_index - 1]["transition_duration"] = value
self.save()
self._update_status()
@@ -385,7 +421,7 @@ def _decrease_duration(self, s):
def _get_is_dirty(self):
if self.keypoint_index == 0:
return True
- state = self.keypoints[self.keypoint_index - 1]['state']
+ state = self.keypoints[self.keypoint_index - 1]["state"]
return state.to_json() != self.viewer.state.to_json()
def _viewer_state_changed(self):
@@ -410,8 +446,10 @@ def _set_keypoint_index(self, index):
self.keypoint_index = index
state_index = max(0, index - 1)
if len(self.keypoints) > 0:
- self.viewer.set_state(self.keypoints[state_index]['state'])
- self.transition_duration = self.keypoints[state_index]['transition_duration']
+ self.viewer.set_state(self.keypoints[state_index]["state"])
+ self.transition_duration = self.keypoints[state_index][
+ "transition_duration"
+ ]
self.is_dirty = False
else:
self.is_dirty = True
@@ -440,11 +478,11 @@ def _update_status(self):
if self.playback_manager is not None:
dirty_message = self.playback_manager.playback_status
elif self.is_dirty:
- dirty_message = ' [ CHANGED ]'
+ dirty_message = " [ CHANGED ]"
else:
- dirty_message = ''
+ dirty_message = ""
- status = '[ Keypoint %d/%d ]%s [ transition duration %g s ] %s' % (
+ status = "[ Keypoint %d/%d ]%s [ transition duration %g s ] %s" % (
self.keypoint_index,
len(self.keypoints),
dirty_message,
@@ -452,36 +490,39 @@ def _update_status(self):
self._keybinding_message,
)
with self.viewer.config_state.txn() as s:
- s.status_messages['status'] = status
+ s.status_messages["status"] = status
def _quit(self, s):
self.quit_event.set()
def run_edit(args):
- editor = ScriptEditor(script_path=args.script,
- transition_duration=args.duration,
- fullscreen_width=args.width,
- fullscreen_height=args.height,
- fullscreen_scale_bar_scale=args.scale_bar_scale,
- frames_per_second=args.fps)
+ editor = ScriptEditor(
+ script_path=args.script,
+ transition_duration=args.duration,
+ fullscreen_width=args.width,
+ fullscreen_height=args.height,
+ fullscreen_scale_bar_scale=args.scale_bar_scale,
+ frames_per_second=args.fps,
+ )
print(editor.viewer)
if args.browser:
webbrowser.open_new(editor.viewer.get_viewer_url())
editor.quit_event.wait()
-def _get_states_to_capture(keypoints, fps, resume, output_directory):
-
+def _get_states_to_capture(
+ keypoints, fps, resume, output_directory
+) -> list[tuple[int, float, neuroglancer.ViewerState, str]]:
def get_output_path(frame_number: int) -> str:
- return os.path.join(args.output_directory, '%07d.png' % frame_number)
+ return os.path.join(args.output_directory, "%07d.png" % frame_number)
- states_to_capture = []
+ states_to_capture: list[tuple[int, float, neuroglancer.ViewerState, str]] = []
frame_number = 0
for i in range(len(keypoints) - 1):
- a = keypoints[i]['state']
- b = keypoints[i + 1]['state']
- duration = keypoints[i]['transition_duration']
+ a = keypoints[i]["state"]
+ b = keypoints[i + 1]["state"]
+ duration = keypoints[i]["transition_duration"]
num_frames = max(1, int(duration * fps))
for frame_i in range(num_frames):
t = frame_i / num_frames
@@ -496,39 +537,38 @@ def get_output_path(frame_number: int) -> str:
def run_render(args):
keypoints = load_script(args.script)
for keypoint in keypoints:
- neuroglancer.tool.screenshot.apply_state_modifications(keypoint['state'], args)
+ neuroglancer.tool.screenshot.apply_state_modifications(keypoint["state"], args)
fps = args.fps
- total_frames = sum(max(1, k['transition_duration'] * fps) for k in keypoints[:-1])
+ total_frames = sum(max(1, k["transition_duration"] * fps) for k in keypoints[:-1])
os.makedirs(args.output_directory, exist_ok=True)
- states_to_capture = _get_states_to_capture(keypoints,
- fps=fps,
- resume=args.resume,
- output_directory=args.output_directory)
+ states_to_capture = _get_states_to_capture(
+ keypoints, fps=fps, resume=args.resume, output_directory=args.output_directory
+ )
num_frames_written = [total_frames - len(states_to_capture)]
lock = threading.Lock()
- def make_request(
- state_to_capture: Tuple[int, int, neuroglancer.ViewerState]
- ) -> neuroglancer.tool.screenshot.CaptureScreenshotRequest:
+ def make_request(state_to_capture: tuple[int, int, neuroglancer.ViewerState, str]):
frame_number, t, state, path = state_to_capture
def config_callback(s):
s.viewer_size = (args.width, args.height)
s.scale_bar_options.scale_factor = args.scale_bar_scale
- frame_desc = '%.3f/%5d' % (t, len(keypoints))
+ frame_desc = "%.3f/%5d" % (t, len(keypoints))
def response_callback(screenshot):
- with atomicwrites.atomic_write(path, mode='wb', overwrite=True) as f:
+ with atomicwrites.atomic_write(path, mode="wb", overwrite=True) as f:
f.write(screenshot.image)
with lock:
num_frames_written[0] += 1
cur_num_frames_written = num_frames_written[0]
- print('[%07d/%07d] keypoint %s: %s' %
- (cur_num_frames_written, total_frames, frame_desc, path))
+ print(
+ "[%07d/%07d] keypoint %s: %s"
+ % (cur_num_frames_written, total_frames, frame_desc, path)
+ )
return neuroglancer.tool.screenshot.CaptureScreenshotRequest(
config_callback=config_callback,
@@ -540,14 +580,18 @@ def response_callback(screenshot):
with neuroglancer.tool.screenshot.get_viewers(args) as viewers:
neuroglancer.tool.screenshot.capture_screenshots_in_parallel(
viewers=viewers,
- request_iter=[make_request(state_to_capture) for state_to_capture in states_to_capture],
+ request_iter=[
+ make_request(state_to_capture) for state_to_capture in states_to_capture
+ ],
refresh_browser_timeout=args.refresh_browser_timeout,
num_to_prefetch=args.prefetch,
- total_requests=len(states_to_capture))
+ total_requests=len(states_to_capture),
+ )
def run_pan(args):
from scipy.spatial.transform import Rotation
+
state = args.state
duration = args.duration
@@ -565,40 +609,52 @@ def run_pan(args):
save_script(args.script, keypoints)
-if __name__ == '__main__':
+if __name__ == "__main__":
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
- sub_aps = ap.add_subparsers(help='command to run')
- ap_edit = sub_aps.add_parser('edit', help='Create or edit a script.')
+ sub_aps = ap.add_subparsers(help="command to run")
+ ap_edit = sub_aps.add_parser("edit", help="Create or edit a script.")
ap_edit.set_defaults(func=run_edit)
- ap_render = sub_aps.add_parser('render', help='Render a script.')
+ ap_render = sub_aps.add_parser("render", help="Render a script.")
ap_render.set_defaults(func=run_render)
- ap_pan = sub_aps.add_parser('pan', help='Create 360 degree pan script.')
+ ap_pan = sub_aps.add_parser("pan", help="Create 360 degree pan script.")
ap_pan.set_defaults(func=run_pan)
neuroglancer.cli.add_state_arguments(ap_pan, required=True)
- ap_pan.add_argument('-d', '--duration', type=float, help="Pan duration in seconds.", default=4)
+ ap_pan.add_argument(
+ "-d", "--duration", type=float, help="Pan duration in seconds.", default=4
+ )
for ap_sub in [ap_edit, ap_render, ap_pan]:
- ap_sub.add_argument('script', help='Path to script file to read and write.')
-
- ap_edit.add_argument('-d',
- '--duration',
- type=float,
- help='Default transition duration in seconds.',
- default=1)
- ap_edit.add_argument('--browser', action='store_true', help='Open web browser automatically.')
- ap_edit.add_argument('--width', type=int, help='Frame width', default=1920)
- ap_edit.add_argument('--height', type=int, help='Frame height', default=1080)
- ap_edit.add_argument('--scale-bar-scale',
- type=float,
- help='Scale factor for scale bar',
- default=1)
- ap_edit.add_argument('-f', '--fps', type=float, help='Frames per second.', default=5)
-
- ap_render.add_argument('output_directory',
- help='Directory in which to write screenshot frames.')
- ap_render.add_argument('-f', '--fps', type=float, help='Frames per second.', default=24)
- ap_render.add_argument('--resume', action='store_true', help='Skip already rendered frames.')
+ ap_sub.add_argument("script", help="Path to script file to read and write.")
+
+ ap_edit.add_argument(
+ "-d",
+ "--duration",
+ type=float,
+ help="Default transition duration in seconds.",
+ default=1,
+ )
+ ap_edit.add_argument(
+ "--browser", action="store_true", help="Open web browser automatically."
+ )
+ ap_edit.add_argument("--width", type=int, help="Frame width", default=1920)
+ ap_edit.add_argument("--height", type=int, help="Frame height", default=1080)
+ ap_edit.add_argument(
+ "--scale-bar-scale", type=float, help="Scale factor for scale bar", default=1
+ )
+ ap_edit.add_argument(
+ "-f", "--fps", type=float, help="Frames per second.", default=5
+ )
+
+ ap_render.add_argument(
+ "output_directory", help="Directory in which to write screenshot frames."
+ )
+ ap_render.add_argument(
+ "-f", "--fps", type=float, help="Frames per second.", default=24
+ )
+ ap_render.add_argument(
+ "--resume", action="store_true", help="Skip already rendered frames."
+ )
neuroglancer.tool.screenshot.define_state_modification_args(ap_render)
neuroglancer.tool.screenshot.define_viewer_args(ap_render)
neuroglancer.tool.screenshot.define_size_args(ap_render)
diff --git a/python/neuroglancer/trackable_state.py b/python/neuroglancer/trackable_state.py
index 6e92e0f808..8c393abfe6 100644
--- a/python/neuroglancer/trackable_state.py
+++ b/python/neuroglancer/trackable_state.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import contextlib
import copy
@@ -24,7 +23,8 @@
class ConcurrentModificationError(RuntimeError):
pass
-class ChangeNotifier(object):
+
+class ChangeNotifier:
def __init__(self):
self.__changed_callbacks = set()
self.change_count = 0
@@ -48,28 +48,36 @@ def _dispatch_changed_callbacks(self):
for callback in self.__changed_callbacks:
callback()
+
class TrackableState(ChangeNotifier):
def __init__(self, wrapper_type, transform_state=None):
- super(TrackableState, self).__init__()
+ super().__init__()
self._raw_state = {}
self._lock = threading.RLock()
self._generation = make_random_token()
self._wrapped_state = None
self._wrapper_type = wrapper_type
if transform_state is None:
+
def transform_state_function(new_state):
if isinstance(new_state, wrapper_type):
return new_state.to_json()
return new_state
+
transform_state = transform_state_function
self._transform_state = transform_state
def set_state(self, new_state, generation=None, existing_generation=None):
with self._lock:
- if existing_generation is not None and self._generation != existing_generation:
+ if (
+ existing_generation is not None
+ and self._generation != existing_generation
+ ):
raise ConcurrentModificationError
new_state = self._transform_state(new_state)
- if new_state != self._raw_state or (generation is not None and generation != self._generation):
+ if new_state != self._raw_state or (
+ generation is not None and generation != self._generation
+ ):
if generation is None:
generation = make_random_token()
self._raw_state = new_state
@@ -101,7 +109,9 @@ def state(self):
with self._lock:
wrapped_state = self._wrapped_state
if wrapped_state is None:
- wrapped_state = self._wrapped_state = self._wrapper_type(self._raw_state, _readonly=True)
+ wrapped_state = self._wrapped_state = self._wrapper_type(
+ self._raw_state, _readonly=True
+ )
return wrapped_state
@contextlib.contextmanager
@@ -131,4 +141,4 @@ def retry_txn(self, func, retries=10, lock=False):
raise
def __repr__(self):
- return u'%s(%r)' % (type(self).__name__, self.state)
+ return f"{type(self).__name__}({self.state!r})"
diff --git a/python/neuroglancer/url_state.py b/python/neuroglancer/url_state.py
index 29fff16b8d..262663df58 100644
--- a/python/neuroglancer/url_state.py
+++ b/python/neuroglancer/url_state.py
@@ -12,26 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-import collections
import json
import re
-
-from six.moves import urllib
+import urllib.parse
from . import viewer_state
from .json_utils import json_encoder_default
from .json_wrappers import to_json
-SINGLE_QUOTE_STRING_PATTERN = u'(\'(?:[^\'\\\\]|(?:\\\\.))*\')'
-DOUBLE_QUOTE_STRING_PATTERN = u'("(?:[^"\\\\]|(?:\\\\.))*")'
-SINGLE_OR_DOUBLE_QUOTE_STRING_PATTERN = SINGLE_QUOTE_STRING_PATTERN + u'|' + DOUBLE_QUOTE_STRING_PATTERN
-DOUBLE_OR_SINGLE_QUOTE_STRING_PATTERN = DOUBLE_QUOTE_STRING_PATTERN + u'|' + SINGLE_QUOTE_STRING_PATTERN
+SINGLE_QUOTE_STRING_PATTERN = "('(?:[^'\\\\]|(?:\\\\.))*')"
+DOUBLE_QUOTE_STRING_PATTERN = '("(?:[^"\\\\]|(?:\\\\.))*")'
+SINGLE_OR_DOUBLE_QUOTE_STRING_PATTERN = (
+ SINGLE_QUOTE_STRING_PATTERN + "|" + DOUBLE_QUOTE_STRING_PATTERN
+)
+DOUBLE_OR_SINGLE_QUOTE_STRING_PATTERN = (
+ DOUBLE_QUOTE_STRING_PATTERN + "|" + SINGLE_QUOTE_STRING_PATTERN
+)
-DOUBLE_QUOTE_PATTERN = u'^((?:[^"\'\\\\]|(?:\\\\.))*)"'
-SINGLE_QUOTE_PATTERN = u'^((?:[^"\'\\\\]|(?:\\\\.))*)\''
+DOUBLE_QUOTE_PATTERN = '^((?:[^"\'\\\\]|(?:\\\\.))*)"'
+SINGLE_QUOTE_PATTERN = "^((?:[^\"'\\\\]|(?:\\\\.))*)'"
def _convert_string_literal(x, quote_initial, quote_replace, quote_search):
@@ -44,37 +45,39 @@ def _convert_string_literal(x, quote_initial, quote_replace, quote_search):
s += inner
break
s += m.group(1)
- s += u'\\'
+ s += "\\"
s += quote_replace
- inner = inner[m.end():]
+ inner = inner[m.end() :]
s += quote_replace
return s
return x
def _convert_json_helper(x, desired_comma_char, desired_quote_char):
- comma_search = u'[&_,]'
- if desired_quote_char == u'"':
- quote_initial = u'\''
+ comma_search = "[&_,]"
+ if desired_quote_char == '"':
+ quote_initial = "'"
quote_search = DOUBLE_QUOTE_PATTERN
string_literal_pattern = SINGLE_OR_DOUBLE_QUOTE_STRING_PATTERN
else:
- quote_initial = u'"'
+ quote_initial = '"'
quote_search = SINGLE_QUOTE_PATTERN
string_literal_pattern = DOUBLE_OR_SINGLE_QUOTE_STRING_PATTERN
- s = u''
+ s = ""
while x:
m = re.search(string_literal_pattern, x)
if m is None:
before = x
- x = u''
- replacement = u''
+ x = ""
+ replacement = ""
else:
- before = x[:m.start()]
- x = x[m.end():]
+ before = x[: m.start()]
+ x = x[m.end() :]
original_string = m.group(1)
if original_string is not None:
- replacement = _convert_string_literal(original_string, quote_initial, desired_quote_char, quote_search)
+ replacement = _convert_string_literal(
+ original_string, quote_initial, desired_quote_char, quote_search
+ )
else:
replacement = m.group(2)
s += re.sub(comma_search, desired_comma_char, before)
@@ -83,38 +86,48 @@ def _convert_json_helper(x, desired_comma_char, desired_quote_char):
def url_safe_to_json(x):
- return _convert_json_helper(x, u',', u'"')
+ return _convert_json_helper(x, ",", '"')
+
def json_to_url_safe(x):
- return _convert_json_helper(x, u'_', u'\'')
+ return _convert_json_helper(x, "_", "'")
+
def url_fragment_to_json(fragment_value):
unquoted = urllib.parse.unquote(fragment_value)
- if unquoted.startswith('!'):
+ if unquoted.startswith("!"):
unquoted = unquoted[1:]
return url_safe_to_json(unquoted)
def parse_url_fragment(fragment_value):
json_string = url_fragment_to_json(fragment_value)
- return viewer_state.ViewerState(
- json.loads(json_string, object_pairs_hook=collections.OrderedDict))
+ return viewer_state.ViewerState(json.loads(json_string))
def parse_url(url):
result = urllib.parse.urlparse(url)
return parse_url_fragment(result.fragment)
+
def to_url_fragment(state):
- json_string = json.dumps(to_json(state), separators=(u',', u':'), default=json_encoder_default)
- return urllib.parse.quote(json_string, safe=u'~@#$&()*!+=:;,.?/\'')
+ json_string = json.dumps(
+ to_json(state), separators=(",", ":"), default=json_encoder_default
+ )
+ return urllib.parse.quote(json_string, safe="~@#$&()*!+=:;,.?/'")
+
+
+default_neuroglancer_url = "https://neuroglancer-demo.appspot.com"
-default_neuroglancer_url = u'https://neuroglancer-demo.appspot.com'
def to_url(state, prefix=default_neuroglancer_url):
- return u'%s#!%s' % (prefix, to_url_fragment(state))
+ return f"{prefix}#!{to_url_fragment(state)}"
+
def to_json_dump(state, indent=None, separators=None):
- return json.dumps(to_json(state), separators=separators,
- indent=indent,
- default=json_encoder_default)
+ return json.dumps(
+ to_json(state),
+ separators=separators,
+ indent=indent,
+ default=json_encoder_default,
+ )
diff --git a/python/neuroglancer/viewer.py b/python/neuroglancer/viewer.py
index f03cf807bb..d4a310e01a 100644
--- a/python/neuroglancer/viewer.py
+++ b/python/neuroglancer/viewer.py
@@ -12,16 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
from . import server, url_state, viewer_base
-class _ViewerHelper(object):
+class _ViewerHelper:
"""Mixin for implementing viewers based on the built-in server."""
def __init__(self, **kwargs):
- super(_ViewerHelper, self).__init__(**kwargs)
+ super().__init__(**kwargs)
server.register_viewer(self)
def defer_callback(self, callback, *args, **kwargs):
@@ -39,17 +38,19 @@ def get_viewer_url(self):
class Viewer(viewer_base.ViewerBase, _ViewerHelper):
def __init__(self, **kwargs):
- super(Viewer, self).__init__(**kwargs)
+ super().__init__(**kwargs)
server.register_viewer(self)
def get_viewer_url(self):
- return '%s/v/%s/' % (server.get_server_url(), self.token)
+ return f"{server.get_server_url()}/v/{self.token}/"
class UnsynchronizedViewer(viewer_base.UnsynchronizedViewerBase, _ViewerHelper):
def __init__(self, **kwargs):
- super(UnsynchronizedViewer, self).__init__(**kwargs)
+ super().__init__(**kwargs)
server.register_viewer(self)
def get_viewer_url(self):
- return url_state.to_url(self.raw_state, '%s/v/%s/' % (server.get_server_url(), self.token))
+ return url_state.to_url(
+ self.raw_state, f"{server.get_server_url()}/v/{self.token}/"
+ )
diff --git a/python/neuroglancer/viewer_base.py b/python/neuroglancer/viewer_base.py
index 763ad0bd01..f3068b60eb 100644
--- a/python/neuroglancer/viewer_base.py
+++ b/python/neuroglancer/viewer_base.py
@@ -12,20 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import collections
-from concurrent.futures import Future
import contextlib
import json
import re
import threading
+from concurrent.futures import Future
from typing import Optional
import numpy as np
-from . import coordinate_space, local_volume, trackable_state, viewer_config_state, viewer_state
-from . import skeleton
+from . import (
+ coordinate_space,
+ local_volume,
+ skeleton,
+ trackable_state,
+ viewer_config_state,
+ viewer_state,
+)
from .json_utils import decode_json, encode_json, json_encoder_default
from .random_token import make_random_token
@@ -36,9 +41,8 @@
class LocalVolumeManager(trackable_state.ChangeNotifier):
-
def __init__(self, token_prefix):
- super(LocalVolumeManager, self).__init__()
+ super().__init__()
self.volumes = dict()
self.__token_prefix = token_prefix
@@ -47,16 +51,16 @@ def register_volume(self, v):
self.volumes[v.token] = v
self._dispatch_changed_callbacks()
if isinstance(v, local_volume.LocalVolume):
- source_type = 'volume'
+ source_type = "volume"
else:
- source_type = 'skeleton'
- return 'python://%s/%s' % (source_type, self.get_volume_key(v))
+ source_type = "skeleton"
+ return f"python://{source_type}/{self.get_volume_key(v)}"
def get_volume_key(self, v):
return self.__token_prefix + v.token
def update(self, json_str):
- pattern = '|'.join(self.volumes)
+ pattern = "|".join(self.volumes)
present_tokens = set()
for m in re.finditer(pattern, json_str):
present_tokens.add(m.group(0))
@@ -70,8 +74,7 @@ def update(self, json_str):
self._dispatch_changed_callbacks()
-class ViewerCommonBase(object):
-
+class ViewerCommonBase:
def __init__(self, token=None, allow_credentials=None):
if token is None:
token = make_random_token()
@@ -82,10 +85,11 @@ def __init__(self, token=None, allow_credentials=None):
allow_credentials = False
self.allow_credentials = allow_credentials
self.token = token
- self.config_state = trackable_state.TrackableState(viewer_config_state.ConfigState)
+ self.config_state = trackable_state.TrackableState(
+ viewer_config_state.ConfigState
+ )
def set_actions(actions):
-
def func(s):
s.actions = actions
@@ -93,7 +97,7 @@ def func(s):
self.actions = viewer_config_state.Actions(set_actions)
- self.volume_manager = LocalVolumeManager(self.token + '.')
+ self.volume_manager = LocalVolumeManager(self.token + ".")
self.__watched_volumes = dict()
@@ -101,16 +105,18 @@ def func(s):
self._next_screenshot_id = 0
self._screenshot_callbacks = {}
- self._volume_info_promises: Dict[str, "Future[viewer_config_state.VolumeInfo]"] = {}
- self._volume_chunk_promises: Dict[str, "Future[np.ndarray]"] = {}
- self.actions.add('screenshot', self._handle_screenshot_reply)
- self.actions.add('screenshotStatistics', self._handle_screenshot_statistics)
+ self._volume_info_promises: dict[
+ str, "Future[viewer_config_state.VolumeInfo]"
+ ] = {}
+ self._volume_chunk_promises: dict[str, Future[np.ndarray]] = {}
+ self.actions.add("screenshot", self._handle_screenshot_reply)
+ self.actions.add("screenshotStatistics", self._handle_screenshot_statistics)
def async_screenshot(self, callback, include_depth=False, statistics_callback=None):
"""Captures a screenshot asynchronously."""
screenshot_id = str(self._next_screenshot_id)
if include_depth:
- screenshot_id = screenshot_id + '_includeDepth'
+ screenshot_id = screenshot_id + "_includeDepth"
self._next_screenshot_id += 1
def set_screenshot_id(s):
@@ -147,12 +153,16 @@ def handler(s):
result[0] = s
event.set()
- self.async_screenshot(handler,
- include_depth=include_depth,
- statistics_callback=statistics_callback)
+ self.async_screenshot(
+ handler,
+ include_depth=include_depth,
+ statistics_callback=statistics_callback,
+ )
event.wait()
- if size is not None and (result[0].screenshot.width != size[0]
- or result[0].screenshot.height != size[1]):
+ if size is not None and (
+ result[0].screenshot.width != size[0]
+ or result[0].screenshot.height != size[1]
+ ):
continue
break
if size is not None:
@@ -160,7 +170,6 @@ def handler(s):
return result[0]
def _handle_screenshot_reply(self, s):
-
def set_screenshot_id(s):
s.screenshot = None
@@ -173,24 +182,29 @@ def set_screenshot_id(s):
def _handle_screenshot_statistics(self, s):
screenshot_id = s.screenshot_statistics.id
callback = self._screenshot_callbacks.get(screenshot_id)
- if callback is None or callback[1] is None: return
+ if callback is None or callback[1] is None:
+ return
callback[1](s.screenshot_statistics)
- def volume_info(self,
- layer: str,
- *,
- dimensions: Optional[coordinate_space.CoordinateSpace] = None
- ) -> "Future[ts.TensorStore]":
+ def volume_info(
+ self,
+ layer: str,
+ *,
+ dimensions: Optional[coordinate_space.CoordinateSpace] = None,
+ ) -> "Future[ts.TensorStore]":
request_id = make_random_token()
- future = Future()
+ future: Future[ts.TensorStore] = Future()
self._volume_info_promises[request_id] = future
def add_request(s):
s.volume_requests.append(
- viewer_config_state.VolumeRequest(kind='volume_info',
- id=request_id,
- layer=layer,
- dimensions=dimensions))
+ viewer_config_state.VolumeRequest(
+ kind="volume_info",
+ id=request_id,
+ layer=layer,
+ dimensions=dimensions,
+ )
+ )
try:
self.config_state.retry_txn(add_request, lock=True)
@@ -203,16 +217,18 @@ def volume(
self,
layer: str,
*,
- dimensions: Optional[coordinate_space.CoordinateSpace] = None
+ dimensions: Optional[coordinate_space.CoordinateSpace] = None,
) -> "Future[ts.TensorStore]":
- future = Future()
+ future: Future[ts.TensorStore] = Future()
def info_done(info_future):
try:
info = info_future.result()
dimension_units = [
- '%s %s' % (scale, unit)
- for scale, unit in zip(info.dimensions.scales, info.dimensions.units)
+ f"{scale} {unit}"
+ for scale, unit in zip(
+ info.dimensions.scales, info.dimensions.units
+ )
]
def read_function(domain: ts.IndexDomain, array, params):
@@ -220,8 +236,10 @@ def read_function(domain: ts.IndexDomain, array, params):
origin = domain.origin
grid_origin = info.grid_origin
chunk_shape = info.chunk_shape
- chunk_pos = [(origin[i] - grid_origin[i]) / chunk_shape[i]
- for i in range(domain.rank)]
+ chunk_pos = [
+ (origin[i] - grid_origin[i]) / chunk_shape[i]
+ for i in range(domain.rank)
+ ]
def chunk_done(chunk_future):
try:
@@ -231,7 +249,9 @@ def chunk_done(chunk_future):
except Exception as e:
read_promise.set_exception(e)
- self._volume_chunk(layer, info, chunk_pos).add_done_callback(chunk_done)
+ self._volume_chunk(layer, info, chunk_pos).add_done_callback(
+ chunk_done
+ )
return read_future
future.set_result(
@@ -239,12 +259,15 @@ def chunk_done(chunk_future):
read_function=read_function,
rank=info.rank,
dtype=ts.dtype(info.data_type),
- domain=ts.IndexDomain(labels=info.dimensions.names,
- inclusive_min=info.lower_bound,
- exclusive_max=info.upper_bound),
+ domain=ts.IndexDomain(
+ labels=info.dimensions.names,
+ inclusive_min=info.lower_bound,
+ exclusive_max=info.upper_bound,
+ ),
dimension_units=dimension_units,
chunk_layout=ts.ChunkLayout(read_chunk_shape=info.chunk_shape),
- ))
+ )
+ )
except Exception as e:
future.set_exception(e)
@@ -253,20 +276,28 @@ def chunk_done(chunk_future):
return future
def _volume_chunk(
- self, layer: str, info: viewer_config_state.VolumeInfo,
- chunk_grid_position: collections.abc.Sequence[int]) -> "ts.Future[np.ndarray]":
+ self,
+ layer: str,
+ info: viewer_config_state.VolumeInfo,
+ chunk_grid_position: collections.abc.Sequence[int],
+ ) -> Future[np.ndarray]:
request_id = make_random_token()
- #promise, future = ts.Promise.new()
+ # promise, future = ts.Promise.new()
+ future: Future[np.ndarray]
+ promise: Future[np.ndarray]
future = promise = Future()
self._volume_chunk_promises[request_id] = promise
def add_request(s):
s.volume_requests.append(
- viewer_config_state.VolumeRequest(kind='volume_chunk',
- id=request_id,
- layer=layer,
- volume_info=info,
- chunk_grid_position=chunk_grid_position))
+ viewer_config_state.VolumeRequest(
+ kind="volume_chunk",
+ id=request_id,
+ layer=layer,
+ volume_info=info,
+ chunk_grid_position=chunk_grid_position,
+ )
+ )
try:
self.config_state.retry_txn(add_request, lock=True)
@@ -276,7 +307,6 @@ def add_request(s):
return future
def _handle_volume_info_reply(self, request_id, reply):
-
def remove_request(s):
s.volume_requests = [r for r in s.volume_requests if r.id != request_id]
@@ -284,15 +314,15 @@ def remove_request(s):
promise = self._volume_info_promises.pop(request_id, None)
if promise is None:
return
- if not isinstance(reply, dict): return
- error = reply.get('error')
+ if not isinstance(reply, dict):
+ return
+ error = reply.get("error")
if error is not None:
promise.set_exception(ValueError(error))
else:
promise.set_result(viewer_config_state.VolumeInfo(reply))
def _handle_volume_chunk_reply(self, request_id, params, data):
-
def remove_request(s):
s.volume_requests = [r for r in s.volume_requests if r.id != request_id]
@@ -300,19 +330,20 @@ def remove_request(s):
promise = self._volume_chunk_promises.pop(request_id, None)
if promise is None:
return
- if not isinstance(params, dict): return
- error = params.get('error')
+ if not isinstance(params, dict):
+ return
+ error = params.get("error")
if error is not None:
promise.set_exception(ValueError(error))
return
- array = np.frombuffer(data, dtype=np.dtype(params['dtype']))
- order = params['order']
+ array = np.frombuffer(data, dtype=np.dtype(params["dtype"]))
+ order = params["order"]
rank = len(order)
- shape = params['chunkDataSize']
+ shape = params["chunkDataSize"]
inverse_order = [0] * rank
for physical_dim, logical_dim in enumerate(order):
inverse_order[logical_dim] = physical_dim
- if params.get('isFillValue'):
+ if params.get("isFillValue"):
array = np.broadcast_to(array.reshape([]), shape[::-1])
else:
array = array.reshape(shape[::-1])
@@ -332,7 +363,6 @@ def _handle_volumes_changed(self):
volume.remove_changed_callback(self._update_source_generations)
def _update_source_generations(self):
-
def func(s):
volume_manager = self.volume_manager
s.source_generations = {
@@ -359,13 +389,14 @@ def txn(self):
class ViewerBase(ViewerCommonBase):
-
def __init__(self, **kwargs):
- super(ViewerBase, self).__init__(**kwargs)
- self.shared_state = trackable_state.TrackableState(viewer_state.ViewerState,
- self._transform_viewer_state)
+ super().__init__(**kwargs)
+ self.shared_state = trackable_state.TrackableState(
+ viewer_state.ViewerState, self._transform_viewer_state
+ )
self.shared_state.add_changed_callback(
- lambda: self.volume_manager.update(encode_json(self.shared_state.raw_state)))
+ lambda: self.volume_manager.update(encode_json(self.shared_state.raw_state))
+ )
@property
def state(self):
@@ -382,9 +413,8 @@ def retry_txn(self, *args, **kwargs):
class UnsynchronizedViewerBase(ViewerCommonBase):
-
def __init__(self, **kwargs):
- super(UnsynchronizedViewerBase, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self.state = viewer_state.ViewerState()
@property
diff --git a/python/neuroglancer/viewer_config_state.py b/python/neuroglancer/viewer_config_state.py
index 569e8f7719..a5b58b8009 100644
--- a/python/neuroglancer/viewer_config_state.py
+++ b/python/neuroglancer/viewer_config_state.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import base64
import collections
@@ -21,39 +20,50 @@
import traceback
import numpy as np
-import six
from . import viewer_state
-from .json_wrappers import (JsonObjectWrapper, array_wrapper, optional, text_type, typed_list,
- typed_set, typed_string_map, wrapped_property)
-
-_uint64_keys = frozenset(['t', 'v'])
-_map_entry_keys = frozenset(['key', 'value'])
-
-
-class SegmentIdMapEntry(collections.namedtuple('SegmentIdMapEntry', ['key', 'value', 'label'])):
+from .json_wrappers import (
+ JsonObjectWrapper,
+ array_wrapper,
+ optional,
+ text_type,
+ typed_list,
+ typed_set,
+ typed_string_map,
+ wrapped_property,
+)
+
+_uint64_keys = frozenset(["t", "v"])
+_map_entry_keys = frozenset(["key", "value"])
+
+
+class SegmentIdMapEntry(
+ collections.namedtuple("SegmentIdMapEntry", ["key", "value", "label"])
+):
def __new__(cls, key, value=None, label=None):
- return super(SegmentIdMapEntry, cls).__new__(cls, key, value, label)
+ return super().__new__(cls, key, value, label)
def layer_selected_value(x):
if isinstance(x, numbers.Number):
return x
- if isinstance(x, six.string_types):
+ if isinstance(x, str):
return int(x)
if isinstance(x, dict):
- value = x.get('value')
+ value = x.get("value")
if value is not None:
value = int(value)
- return SegmentIdMapEntry(int(x['key']), value, x.get('label'))
+ return SegmentIdMapEntry(int(x["key"]), value, x.get("label"))
return None
class LayerSelectionState(JsonObjectWrapper):
__slots__ = ()
supports_validation = True
- local_position = wrapped_property('localPosition', optional(array_wrapper(np.float32)))
- value = wrapped_property('value', optional(layer_selected_value))
+ local_position = wrapped_property(
+ "localPosition", optional(array_wrapper(np.float32))
+ )
+ value = wrapped_property("value", optional(layer_selected_value))
LayerSelectedValues = typed_string_map(LayerSelectionState)
@@ -61,65 +71,80 @@ class LayerSelectionState(JsonObjectWrapper):
class ScreenshotReply(JsonObjectWrapper):
__slots__ = ()
- id = wrapped_property('id', text_type)
- image = wrapped_property('image', base64.b64decode)
- width = wrapped_property('width', int)
- height = wrapped_property('height', int)
- image_type = imageType = wrapped_property('imageType', text_type)
- depth_data = depthData = wrapped_property('depthData', optional(base64.b64decode))
+ id = wrapped_property("id", text_type)
+ image = wrapped_property("image", base64.b64decode)
+ width = wrapped_property("width", int)
+ height = wrapped_property("height", int)
+ image_type = imageType = wrapped_property("imageType", text_type)
+ depth_data = depthData = wrapped_property("depthData", optional(base64.b64decode))
@property
def image_pixels(self):
"""Returns the screenshot image as a numpy array of pixel values."""
import PIL
+
return np.asarray(PIL.Image.open(io.BytesIO(self.image)))
@property
def depth_array(self):
"""Returns the depth data as a numpy float32 array."""
depth_data = self.depth_data
- if depth_data is None: return None
- return np.frombuffer(depth_data, dtype=' bool:
+ def __contains__(self, segment_id) -> bool:
return segment_id in self._data
def keys(self):
@@ -620,28 +667,28 @@ def __eq__(self, other):
return self._data == other._data
return self._data == other
- def add(self, segment_id: numbers.Integral) -> None:
+ def add(self, segment_id: int) -> None:
if self._readonly:
raise AttributeError
self.setdefault(segment_id, True)
- def get(self, segment_id: numbers.Integral, default_value=None) -> bool:
+ def get(self, segment_id: int, default_value=None) -> bool:
return self._data.get(segment_id, default_value)
- def __getitem__(self, segment_id: numbers.Integral) -> bool:
+ def __getitem__(self, segment_id: int) -> bool:
return self._data[segment_id]
- def remove(self, segment_id: numbers.Integral) -> None:
+ def remove(self, segment_id: int) -> None:
if self._readonly:
raise AttributeError
del self._data[segment_id]
self._visible.pop(segment_id)
- def discard(self, segment_id: numbers.Integral) -> None:
+ def discard(self, segment_id: int) -> None:
self._data.pop(segment_id, None)
self._visible.pop(segment_id, None)
- def __setitem__(self, segment_id: numbers.Integral, visible: bool) -> None:
+ def __setitem__(self, segment_id: int, visible: bool) -> None:
if self._readonly:
raise AttributeError
self._data[segment_id] = visible
@@ -650,7 +697,7 @@ def __setitem__(self, segment_id: numbers.Integral, visible: bool) -> None:
else:
self._visible.pop(segment_id, None)
- def __delitem__(self, segment_id: numbers.Integral) -> None:
+ def __delitem__(self, segment_id: int) -> None:
if self._readonly:
raise AttributeError
del self._data[segment_id]
@@ -663,7 +710,7 @@ def clear(self):
self._visible.clear()
def __repr__(self):
- return f'StarredSegments({self._data}!r)'
+ return f"StarredSegments({self._data}!r)"
def update(self, other):
if self._readonly:
@@ -671,8 +718,10 @@ def update(self, other):
self._update(other)
def to_json(self):
- return [f"{segment}" if visible else f"!{segment}"
- for segment, visible in self.items()]
+ return [
+ f"{segment}" if visible else f"!{segment}"
+ for segment, visible in self.items()
+ ]
def __iter__(self):
return iter(self._data)
@@ -682,20 +731,19 @@ def visible(self):
return VisibleSegments(self)
@visible.setter
- def visible(self, segments: typing.Iterable[numbers.Integral]):
+ def visible(self, segments: collections.abc.Iterable[int]):
new_dict = {}
for k in segments:
num_k = np.uint64(k)
if num_k != k:
- raise ValueError(f'Invalid uint64 value: {k}')
- new_dict[num_k] = True
+ raise ValueError(f"Invalid uint64 value: {k}")
+ new_dict[int(num_k)] = True
self._data = new_dict
self._visible = new_dict.copy()
@export
-class VisibleSegments(collections.abc.MutableSet):
-
+class VisibleSegments(collections.abc.MutableSet[int]):
def __init__(self, starred_segments: StarredSegments):
self._starred_segments = starred_segments
self._visible = self._starred_segments._visible
@@ -706,13 +754,13 @@ def __len__(self):
def clear(self):
self._starred_segments.clear()
- def __contains__(self, segment_id: numbers.Integral):
+ def __contains__(self, segment_id):
return segment_id in self._visible
- def add(self, segment_id: numbers.Integral) -> None:
+ def add(self, segment_id: int) -> None:
self._starred_segments[segment_id] = True
- def discard(self, segment_id: numbers.Integral) -> None:
+ def discard(self, segment_id) -> None:
self._starred_segments.discard(segment_id)
def __iter__(self):
@@ -726,7 +774,7 @@ def copy(self):
return VisibleSegments(new_starred_segments)
def __repr__(self):
- return f'VisibleSegments({list(self)!r})'
+ return f"VisibleSegments({list(self)!r})"
@export
@@ -734,10 +782,10 @@ class SegmentationLayer(Layer, _AnnotationLayerOptions):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(SegmentationLayer, self).__init__(*args, type='segmentation', **kwargs)
+ super().__init__(*args, type="segmentation", **kwargs)
- source = wrapped_property('source', LayerDataSources)
- starred_segments = wrapped_property('segments', StarredSegments)
+ source = wrapped_property("source", LayerDataSources)
+ starred_segments = wrapped_property("segments", StarredSegments)
@property
def visible_segments(self):
@@ -749,19 +797,30 @@ def visible_segments(self, segments):
segments = visible_segments
- equivalences = wrapped_property('equivalences', uint64_equivalence_map)
- hide_segment_zero = hideSegmentZero = wrapped_property('hideSegmentZero', optional(bool, True))
- hover_highlight = hoverHighlight = wrapped_property('hoverHighlight', optional(bool, True))
- base_segment_coloring = baseSegmentColoring = wrapped_property('baseSegmentColoring',
- optional(bool, False))
- selected_alpha = selectedAlpha = wrapped_property('selectedAlpha', optional(float, 0.5))
- not_selected_alpha = notSelectedAlpha = wrapped_property('notSelectedAlpha', optional(float, 0))
- object_alpha = objectAlpha = wrapped_property('objectAlpha', optional(float, 1.0))
- saturation = wrapped_property('saturation', optional(float, 1.0))
- ignore_null_visible_set = ignoreNullVisibleSet = wrapped_property('ignoreNullVisibleSet',
- optional(bool, True))
- skeleton_rendering = skeletonRendering = wrapped_property('skeletonRendering',
- SkeletonRenderingOptions)
+ equivalences = wrapped_property("equivalences", uint64_equivalence_map)
+ hide_segment_zero = hideSegmentZero = wrapped_property(
+ "hideSegmentZero", optional(bool, True)
+ )
+ hover_highlight = hoverHighlight = wrapped_property(
+ "hoverHighlight", optional(bool, True)
+ )
+ base_segment_coloring = baseSegmentColoring = wrapped_property(
+ "baseSegmentColoring", optional(bool, False)
+ )
+ selected_alpha = selectedAlpha = wrapped_property(
+ "selectedAlpha", optional(float, 0.5)
+ )
+ not_selected_alpha = notSelectedAlpha = wrapped_property(
+ "notSelectedAlpha", optional(float, 0)
+ )
+ object_alpha = objectAlpha = wrapped_property("objectAlpha", optional(float, 1.0))
+ saturation = wrapped_property("saturation", optional(float, 1.0))
+ ignore_null_visible_set = ignoreNullVisibleSet = wrapped_property(
+ "ignoreNullVisibleSet", optional(bool, True)
+ )
+ skeleton_rendering = skeletonRendering = wrapped_property(
+ "skeletonRendering", SkeletonRenderingOptions
+ )
@property
def skeleton_shader(self):
@@ -773,17 +832,23 @@ def skeleton_shader(self, shader):
skeletonShader = skeleton_shader
- color_seed = colorSeed = wrapped_property('colorSeed', optional(int, 0))
+ color_seed = colorSeed = wrapped_property("colorSeed", optional(int, 0))
cross_section_render_scale = crossSectionRenderScale = wrapped_property(
- 'crossSectionRenderScale', optional(float, 1))
- mesh_render_scale = meshRenderScale = wrapped_property('meshRenderScale', optional(float, 10))
+ "crossSectionRenderScale", optional(float, 1)
+ )
+ mesh_render_scale = meshRenderScale = wrapped_property(
+ "meshRenderScale", optional(float, 10)
+ )
mesh_silhouette_rendering = meshSilhouetteRendering = wrapped_property(
- 'meshSilhouetteRendering', optional(float, 0))
- segment_query = segmentQuery = wrapped_property('segmentQuery', optional(text_type))
+ "meshSilhouetteRendering", optional(float, 0)
+ )
+ segment_query = segmentQuery = wrapped_property("segmentQuery", optional(text_type))
segment_colors = segmentColors = wrapped_property(
- 'segmentColors', typed_map(key_type=np.uint64, value_type=text_type))
- segment_default_color = segmentDefaultColor = wrapped_property('segmentDefaultColor',
- optional(text_type))
+ "segmentColors", typed_map(key_type=np.uint64, value_type=text_type)
+ )
+ segment_default_color = segmentDefaultColor = wrapped_property(
+ "segmentDefaultColor", optional(text_type)
+ )
@property
def segment_html_color_dict(self):
@@ -793,20 +858,23 @@ def segment_html_color_dict(self):
"""
d = {}
for segment in self.segments:
- hex_string = segment_colors.hex_string_from_segment_id(color_seed=self.color_seed,
- segment_id=segment)
+ hex_string = segment_colors.hex_string_from_segment_id(
+ color_seed=self.color_seed, segment_id=segment
+ )
d[segment] = hex_string
return d
linked_segmentation_group = linkedSegmentationGroup = wrapped_property(
- 'linkedSegmentationGroup', optional(text_type))
+ "linkedSegmentationGroup", optional(text_type)
+ )
linked_segmentation_color_group = linkedSegmentationColorGroup = wrapped_property(
- 'linkedSegmentationColorGroup', optional(_linked_segmentation_color_group_value))
+ "linkedSegmentationColorGroup", optional(_linked_segmentation_color_group_value)
+ )
@staticmethod
def interpolate(a, b, t):
c = Layer.interpolate(a, b, t)
- for k in ['selected_alpha', 'not_selected_alpha', 'object_alpha']:
+ for k in ["selected_alpha", "not_selected_alpha", "object_alpha"]:
setattr(c, k, interpolate_linear(getattr(a, k), getattr(b, k), t))
return c
@@ -816,24 +884,26 @@ class SingleMeshLayer(Layer):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(SingleMeshLayer, self).__init__(*args, type='mesh', **kwargs)
+ super().__init__(*args, type="mesh", **kwargs)
- source = wrapped_property('source', LayerDataSources)
+ source = wrapped_property("source", LayerDataSources)
vertex_attribute_sources = vertexAttributeSources = wrapped_property(
- 'vertexAttributeSources', optional(typed_list(text_type)))
- shader = wrapped_property('shader', text_type)
+ "vertexAttributeSources", optional(typed_list(text_type))
+ )
+ shader = wrapped_property("shader", text_type)
vertex_attribute_names = vertexAttributeNames = wrapped_property(
- 'vertexAttributeNames', optional(typed_list(optional(text_type))))
+ "vertexAttributeNames", optional(typed_list(optional(text_type)))
+ )
class AnnotationBase(JsonObjectWrapper):
__slots__ = ()
- id = wrapped_property('id', optional(text_type)) # pylint: disable=invalid-name
- type = wrapped_property('type', text_type)
- description = wrapped_property('description', optional(text_type))
- segments = wrapped_property('segments', optional(typed_list(typed_list(np.uint64))))
- props = wrapped_property('props', optional(typed_list(number_or_string)))
+ id = wrapped_property("id", optional(text_type)) # pylint: disable=invalid-name
+ type = wrapped_property("type", text_type)
+ description = wrapped_property("description", optional(text_type))
+ segments = wrapped_property("segments", optional(typed_list(typed_list(np.uint64))))
+ props = wrapped_property("props", optional(typed_list(number_or_string)))
@export
@@ -841,9 +911,9 @@ class PointAnnotation(AnnotationBase):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(PointAnnotation, self).__init__(*args, type='point', **kwargs)
+ super().__init__(*args, type="point", **kwargs)
- point = wrapped_property('point', array_wrapper(np.float32))
+ point = wrapped_property("point", array_wrapper(np.float32))
@export
@@ -851,10 +921,10 @@ class LineAnnotation(AnnotationBase):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(LineAnnotation, self).__init__(*args, type='line', **kwargs)
+ super().__init__(*args, type="line", **kwargs)
- point_a = pointA = wrapped_property('pointA', array_wrapper(np.float32))
- point_b = pointB = wrapped_property('pointB', array_wrapper(np.float32))
+ point_a = pointA = wrapped_property("pointA", array_wrapper(np.float32))
+ point_b = pointB = wrapped_property("pointB", array_wrapper(np.float32))
@export
@@ -862,12 +932,10 @@ class AxisAlignedBoundingBoxAnnotation(AnnotationBase):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(AxisAlignedBoundingBoxAnnotation, self).__init__(*args,
- type='axis_aligned_bounding_box',
- **kwargs)
+ super().__init__(*args, type="axis_aligned_bounding_box", **kwargs)
- point_a = pointA = wrapped_property('pointA', array_wrapper(np.float32))
- point_b = pointB = wrapped_property('pointB', array_wrapper(np.float32))
+ point_a = pointA = wrapped_property("pointA", array_wrapper(np.float32))
+ point_b = pointB = wrapped_property("pointB", array_wrapper(np.float32))
@export
@@ -875,17 +943,17 @@ class EllipsoidAnnotation(AnnotationBase):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(EllipsoidAnnotation, self).__init__(*args, type='ellipsoid', **kwargs)
+ super().__init__(*args, type="ellipsoid", **kwargs)
- center = wrapped_property('center', array_wrapper(np.float32))
- radii = wrapped_property('radii', array_wrapper(np.float32))
+ center = wrapped_property("center", array_wrapper(np.float32))
+ radii = wrapped_property("radii", array_wrapper(np.float32))
annotation_types = {
- 'point': PointAnnotation,
- 'line': LineAnnotation,
- 'axis_aligned_bounding_box': AxisAlignedBoundingBoxAnnotation,
- 'ellipsoid': EllipsoidAnnotation,
+ "point": PointAnnotation,
+ "line": LineAnnotation,
+ "axis_aligned_bounding_box": AxisAlignedBoundingBoxAnnotation,
+ "ellipsoid": EllipsoidAnnotation,
}
@@ -894,22 +962,24 @@ def annotation(obj, _readonly=False):
obj = obj.to_json()
elif not isinstance(obj, dict):
raise TypeError
- t = obj.get('type')
+ t = obj.get("type")
return annotation_types[t](obj, _readonly=_readonly)
-annotation.supports_readonly = True
+annotation.supports_readonly = True # type: ignore[attr-defined]
@export
class AnnotationPropertySpec(JsonObjectWrapper):
__slots__ = ()
- id = wrapped_property('id', text_type)
- type = wrapped_property('type', text_type)
- description = wrapped_property('description', optional(text_type))
- default = wrapped_property('default', optional(number_or_string))
- enum_values = wrapped_property('enum_values', optional(typed_list(number_or_string)))
- enum_labels = wrapped_property('enum_labels', optional(typed_list(text_type)))
+ id = wrapped_property("id", text_type)
+ type = wrapped_property("type", text_type)
+ description = wrapped_property("description", optional(text_type))
+ default = wrapped_property("default", optional(number_or_string))
+ enum_values = wrapped_property(
+ "enum_values", optional(typed_list(number_or_string))
+ )
+ enum_labels = wrapped_property("enum_labels", optional(typed_list(text_type)))
@export
@@ -917,22 +987,29 @@ class AnnotationLayer(Layer, _AnnotationLayerOptions):
__slots__ = ()
def __init__(self, *args, **kwargs):
- super(AnnotationLayer, self).__init__(*args, type='annotation', **kwargs)
+ super().__init__(*args, type="annotation", **kwargs)
- source = wrapped_property('source', LayerDataSources)
- annotations = wrapped_property('annotations', typed_list(annotation))
+ source = wrapped_property("source", LayerDataSources)
+ annotations = wrapped_property("annotations", typed_list(annotation))
annotation_properties = annotationProperties = wrapped_property(
- 'annotationProperties', typed_list(AnnotationPropertySpec))
+ "annotationProperties", typed_list(AnnotationPropertySpec)
+ )
annotation_relationships = annotationRelationships = wrapped_property(
- 'annotationRelationships', typed_list(text_type))
+ "annotationRelationships", typed_list(text_type)
+ )
linked_segmentation_layer = linkedSegmentationLayer = wrapped_property(
- 'linkedSegmentationLayer', typed_string_map(text_type))
- filter_by_segmentation = filterBySegmentation = wrapped_property('filterBySegmentation',
- typed_list(text_type))
+ "linkedSegmentationLayer", typed_string_map(text_type)
+ )
+ filter_by_segmentation = filterBySegmentation = wrapped_property(
+ "filterBySegmentation", typed_list(text_type)
+ )
ignore_null_segment_filter = ignoreNullSegmentFilter = wrapped_property(
- 'ignoreNullSegmentFilter', optional(bool, True))
- shader = wrapped_property('shader', text_type)
- shader_controls = shaderControls = wrapped_property('shaderControls', ShaderControls)
+ "ignoreNullSegmentFilter", optional(bool, True)
+ )
+ shader = wrapped_property("shader", text_type)
+ shader_controls = shaderControls = wrapped_property(
+ "shaderControls", ShaderControls
+ )
@staticmethod
def interpolate(a, b, t):
@@ -946,19 +1023,22 @@ class LocalAnnotationLayer(AnnotationLayer):
__slots__ = ()
def __init__(self, dimensions, *args, **kwargs):
- super(LocalAnnotationLayer, self).__init__(
+ super().__init__(
*args,
- source=LayerDataSource(url='local://annotations',
- transform=CoordinateSpaceTransform(outputDimensions=dimensions)),
- **kwargs)
+ source=LayerDataSource(
+ url="local://annotations",
+ transform=CoordinateSpaceTransform(outputDimensions=dimensions),
+ ),
+ **kwargs,
+ )
layer_types = {
- 'image': ImageLayer,
- 'segmentation': SegmentationLayer,
- 'pointAnnotation': PointAnnotationLayer,
- 'annotation': AnnotationLayer,
- 'mesh': SingleMeshLayer,
+ "image": ImageLayer,
+ "segmentation": SegmentationLayer,
+ "pointAnnotation": PointAnnotationLayer,
+ "annotation": AnnotationLayer,
+ "mesh": SingleMeshLayer,
}
@@ -972,7 +1052,7 @@ def make_layer(json_data, _readonly=False):
if not isinstance(json_data, dict):
raise TypeError
- type_name = json_data.get('type')
+ type_name = json_data.get("type")
layer_type = layer_types.get(type_name)
if layer_type is not None:
return layer_type(json_data, _readonly=_readonly)
@@ -982,7 +1062,7 @@ def make_layer(json_data, _readonly=False):
@export
class ManagedLayer(JsonObjectWrapper):
- __slots__ = ('name', 'layer')
+ __slots__ = ("name", "layer")
def __init__(self, name, layer=None, _readonly=False, **kwargs):
if isinstance(name, ManagedLayer):
@@ -991,25 +1071,25 @@ def __init__(self, name, layer=None, _readonly=False, **kwargs):
layer = name.to_json()
name = name.name
- object.__setattr__(self, 'name', name)
+ object.__setattr__(self, "name", name)
if isinstance(layer, Layer):
- json_data = collections.OrderedDict()
+ json_data = {}
elif isinstance(layer, local_volume.LocalVolume):
- json_data = collections.OrderedDict()
+ json_data = {}
layer = make_layer(layer, _readonly=_readonly)
else:
if layer is None:
- json_data = collections.OrderedDict()
+ json_data = {}
else:
json_data = layer
layer = make_layer(json_data, _readonly=_readonly)
- object.__setattr__(self, 'layer', layer)
- super(ManagedLayer, self).__init__(json_data, _readonly=_readonly, **kwargs)
+ object.__setattr__(self, "layer", layer)
+ super().__init__(json_data, _readonly=_readonly, **kwargs)
- _visible = wrapped_property('visible', optional(bool))
- archived = wrapped_property('archived', optional(bool, False))
+ _visible = wrapped_property("visible", optional(bool))
+ archived = wrapped_property("archived", optional(bool, False))
@property
def visible(self):
@@ -1025,28 +1105,30 @@ def __getattr__(self, key):
def __setattr__(self, key, value):
if self._readonly:
raise AttributeError
- if key in ['name', '_visible', 'visible', 'archived', 'layer']:
+ if key in ["name", "_visible", "visible", "archived", "layer"]:
object.__setattr__(self, key, value)
else:
return setattr(self.layer, key, value)
def __repr__(self):
- return u'ManagedLayer(%s,%s)' % (encode_json_for_repr(
- self.name), encode_json_for_repr(self.to_json()))
+ return "ManagedLayer({},{})".format(
+ encode_json_for_repr(self.name),
+ encode_json_for_repr(self.to_json()),
+ )
def to_json(self):
r = self.layer.to_json()
- r['name'] = self.name
+ r["name"] = self.name
archived = self.archived
if not archived:
- r.pop('archived', None)
+ r.pop("archived", None)
else:
- r['archived'] = True
+ r["archived"] = True
visible = self.visible
if visible or archived:
- r.pop('visible', None)
+ r.pop("visible", None)
else:
- r['visible'] = False
+ r["visible"] = False
return r
def __deepcopy__(self, memo):
@@ -1054,26 +1136,31 @@ def __deepcopy__(self, memo):
@export
-class Layers(object):
- __slots__ = ('_layers', '_readonly')
+class Layers:
+ __slots__ = ("_layers", "_readonly")
supports_readonly = True
def __init__(self, json_data, _readonly=False):
if json_data is None:
- json_data = collections.OrderedDict()
+ json_data = {}
self._layers = []
self._readonly = _readonly
if isinstance(json_data, collections.abc.Mapping):
- for k, v in six.iteritems(json_data):
+ for k, v in json_data.items():
self._layers.append(ManagedLayer(k, v, _readonly=_readonly))
else:
# layers property can also be an array in JSON now. each layer has a name property
for layer in json_data:
if isinstance(layer, ManagedLayer):
- self._layers.append(ManagedLayer(layer.name, layer, _readonly=_readonly))
+ self._layers.append(
+ ManagedLayer(layer.name, layer, _readonly=_readonly)
+ )
elif isinstance(layer, dict):
self._layers.append(
- ManagedLayer(text_type(layer['name']), layer, _readonly=_readonly))
+ ManagedLayer(
+ text_type(layer["name"]), layer, _readonly=_readonly
+ )
+ )
else:
raise TypeError
@@ -1088,14 +1175,14 @@ def __contains__(self, k):
def __getitem__(self, k):
"""Indexes into the list of layers by index, slice, or layer name."""
- if isinstance(k, six.string_types):
+ if isinstance(k, str):
return self._layers[self.index(k)]
return self._layers[k]
def __setitem__(self, k, v):
if self._readonly:
raise AttributeError
- if isinstance(k, six.string_types):
+ if isinstance(k, str):
i = self.index(k)
if isinstance(v, Layer):
v = ManagedLayer(k, v)
@@ -1126,7 +1213,7 @@ def __delitem__(self, k):
"""Deletes a layer by index, slice, or name."""
if self._readonly:
raise AttributeError
- if isinstance(k, six.string_types):
+ if isinstance(k, str):
k = self.index(k)
del self._layers[k]
@@ -1168,61 +1255,108 @@ def interpolate(a, b, t):
if index == -1:
continue
other_layer = b[index]
- if type(other_layer.layer) is not type(layer.layer): # pylint: disable=unidiomatic-typecheck
+ if type(other_layer.layer) is not type(layer.layer): # pylint: disable=unidiomatic-typecheck # noqa: E721
continue
- layer.layer = type(layer.layer).interpolate(layer.layer, other_layer.layer, t)
+ layer.layer = type(layer.layer).interpolate(
+ layer.layer, other_layer.layer, t
+ )
return c
def navigation_link_type(x):
- x = six.text_type(x)
+ x = str(x)
x = x.lower()
- if x not in [u'linked', u'unlinked', u'relative']:
- raise ValueError('Invalid navigation link type: %r' % x)
+ if x not in ["linked", "unlinked", "relative"]:
+ raise ValueError("Invalid navigation link type: %r" % x)
return x
-def make_linked_navigation_type(value_type, interpolate_function=None):
+_T = typing.TypeVar("_T")
+
+
+class LinkedType(typing.Generic[_T], JsonObjectWrapper):
+ __slots__ = ()
+ link = wrapped_property("link", optional(navigation_link_type, "linked"))
+ value: _T
+
+ interpolate_function: typing.ClassVar[
+ typing.Callable[[typing.Any, typing.Any, float], typing.Any]
+ ]
+
+ @classmethod
+ def interpolate(cls, a, b, t):
+ c = copy.deepcopy(a)
+ c.link = a.link
+ if a.link == b.link and a.link != "linked":
+ c.value = cls.interpolate_function(a.value, b.value, t)
+ return c
+ return c
+
+
+def make_linked_navigation_type(
+ value_type: typing.Callable[[typing.Any], _T], interpolate_function=None
+) -> type[LinkedType[_T]]:
if interpolate_function is None:
- interpolate_function = value_type.interpolate
+ _interpolate_function = value_type.interpolate # type: ignore[attr-defined]
+ else:
+ _interpolate_function = interpolate_function
+
+ _value_type = value_type
- class LinkedType(JsonObjectWrapper):
+ class Linked(LinkedType):
__slots__ = ()
- link = wrapped_property('link', optional(navigation_link_type, u'linked'))
- value = wrapped_property('value', optional(value_type))
-
- @staticmethod
- def interpolate(a, b, t):
- c = copy.deepcopy(a)
- c.link = a.link
- if a.link == b.link and a.link != 'linked':
- c.value = interpolate_function(a.value, b.value, t)
- return c
- return c
+ value_type = _value_type
+ interpolate_function = _interpolate_function
+ value = wrapped_property("value", optional(value_type))
+
+ return Linked
- return LinkedType
+
+if typing.TYPE_CHECKING:
+ _LinkedPositionBase = LinkedType[np.typing.NDArray[np.float32]]
+else:
+ _LinkedPositionBase = make_linked_navigation_type(
+ array_wrapper(np.float32), interpolate_linear_optional_vectors
+ )
@export
-class LinkedPosition(
- make_linked_navigation_type(array_wrapper(np.float32),
- interpolate_linear_optional_vectors)):
+class LinkedPosition(_LinkedPositionBase):
__slots__ = ()
+if typing.TYPE_CHECKING:
+ _LinkedZoomFactorBase = LinkedType[float]
+else:
+ _LinkedZoomFactorBase = make_linked_navigation_type(float, interpolate_zoom)
+
+
@export
-class LinkedZoomFactor(make_linked_navigation_type(float, interpolate_zoom)):
+class LinkedZoomFactor(_LinkedZoomFactorBase):
__slots__ = ()
+if typing.TYPE_CHECKING:
+ _LinkedDepthRangeBase = LinkedType[float]
+else:
+ _LinkedDepthRangeBase = make_linked_navigation_type(float, interpolate_zoom)
+
+
@export
-class LinkedDepthRange(make_linked_navigation_type(float, interpolate_zoom)):
+class LinkedDepthRange(_LinkedDepthRangeBase):
__slots__ = ()
+if typing.TYPE_CHECKING:
+ _LinkedOrientationStateBase = LinkedType[np.typing.NDArray[np.float32]]
+else:
+ _LinkedOrientationStateBase = make_linked_navigation_type(
+ array_wrapper(np.float32, 4), quaternion_slerp
+ )
+
+
@export
-class LinkedOrientationState(
- make_linked_navigation_type(array_wrapper(np.float32, 4), quaternion_slerp)):
+class LinkedOrientationState(_LinkedOrientationStateBase):
__slots__ = ()
@@ -1230,11 +1364,11 @@ class LinkedOrientationState(
class CrossSection(JsonObjectWrapper):
__slots__ = ()
supports_validation = True
- width = wrapped_property('width', optional(int, 1000))
- height = wrapped_property('height', optional(int, 1000))
- position = wrapped_property('position', LinkedPosition)
- orientation = wrapped_property('orientation', LinkedOrientationState)
- scale = wrapped_property('scale', LinkedZoomFactor)
+ width = wrapped_property("width", optional(int, 1000))
+ height = wrapped_property("height", optional(int, 1000))
+ position = wrapped_property("position", LinkedPosition)
+ orientation = wrapped_property("orientation", LinkedOrientationState)
+ scale = wrapped_property("scale", LinkedZoomFactor)
@staticmethod
def interpolate(a, b, t):
@@ -1242,14 +1376,21 @@ def interpolate(a, b, t):
c.width = interpolate_linear(a.width, b.width, t)
c.height = interpolate_linear(a.height, b.height, t)
c.position = LinkedPosition.interpolate(a.position, b.position, t)
- c.orientation = LinkedOrientationState.interpolate(a.orientation, b.orientation, t)
+ c.orientation = LinkedOrientationState.interpolate(
+ a.orientation, b.orientation, t
+ )
c.scale = LinkedZoomFactor.interpolate(a.scale, b.scale, t)
return c
-@export
-class CrossSectionMap(typed_string_map(CrossSection)):
+if typing.TYPE_CHECKING:
+ _CrossSectionMapBase = TypedStringMap[CrossSection]
+else:
+ _CrossSectionMapBase = typed_string_map(CrossSection)
+
+@export
+class CrossSectionMap(_CrossSectionMapBase):
@staticmethod
def interpolate(a, b, t):
c = copy.deepcopy(a)
@@ -1262,70 +1403,74 @@ def interpolate(a, b, t):
@export
class DataPanelLayout(JsonObjectWrapper):
__slots__ = ()
- type = wrapped_property('type', text_type)
- cross_sections = crossSections = wrapped_property('crossSections', CrossSectionMap)
+ type = wrapped_property("type", text_type)
+ cross_sections = crossSections = wrapped_property("crossSections", CrossSectionMap)
orthographic_projection = orthographicProjection = wrapped_property(
- 'orthographicProjection', optional(bool, False))
+ "orthographicProjection", optional(bool, False)
+ )
def __init__(self, json_data=None, _readonly=False, **kwargs):
- if isinstance(json_data, six.string_types):
- json_data = {'type': six.text_type(json_data)}
- super(DataPanelLayout, self).__init__(json_data, _readonly=_readonly, **kwargs)
+ if isinstance(json_data, str):
+ json_data = {"type": str(json_data)}
+ super().__init__(json_data, _readonly=_readonly, **kwargs)
def to_json(self):
if len(self.cross_sections) == 0 and not self.orthographic_projection:
return self.type
- return super(DataPanelLayout, self).to_json()
+ return super().to_json()
@staticmethod
def interpolate(a, b, t):
if a.type != b.type or len(a.cross_sections) == 0:
return a
c = copy.deepcopy(a)
- c.cross_sections = CrossSectionMap.interpolate(a.cross_sections, b.cross_sections, t)
+ c.cross_sections = CrossSectionMap.interpolate(
+ a.cross_sections, b.cross_sections, t
+ )
return c
-def data_panel_layout_wrapper(default_value='xy'):
-
+def data_panel_layout_wrapper(default_value="xy"):
def wrapper(x, _readonly=False):
if x is None:
x = default_value
- if isinstance(x, six.string_types):
- x = {'type': six.text_type(x)}
+ if isinstance(x, str):
+ x = {"type": str(x)}
return DataPanelLayout(x, _readonly=_readonly)
wrapper.supports_readonly = True
return wrapper
-data_panel_layout_types = frozenset(['xy', 'yz', 'xz', 'xy-3d', 'yz-3d', 'xz-3d', '4panel', '3d'])
+data_panel_layout_types = frozenset(
+ ["xy", "yz", "xz", "xy-3d", "yz-3d", "xz-3d", "4panel", "3d"]
+)
def layout_specification(x, _readonly=False):
if x is None:
- x = '4panel'
- if isinstance(x, six.string_types):
- x = {'type': six.text_type(x)}
+ x = "4panel"
+ if isinstance(x, str):
+ x = {"type": str(x)}
if isinstance(x, (StackLayout, LayerGroupViewer, DataPanelLayout)):
return type(x)(x.to_json(), _readonly=_readonly)
if not isinstance(x, dict):
raise ValueError
- layout_type = layout_types.get(x.get('type'))
+ layout_type = layout_types.get(x.get("type"))
if layout_type is None:
raise ValueError
return layout_type(x, _readonly=_readonly)
-layout_specification.supports_readonly = True
+layout_specification.supports_readonly = True # type: ignore[attr-defined]
@export
class StackLayout(JsonObjectWrapper):
__slots__ = ()
- type = wrapped_property('type', text_type)
- flex = wrapped_property('flex', optional(float, 1))
- children = wrapped_property('children', typed_list(layout_specification))
+ type = wrapped_property("type", text_type)
+ flex = wrapped_property("flex", optional(float, 1))
+ children = wrapped_property("children", typed_list(layout_specification))
def __getitem__(self, key):
return self.children[key]
@@ -1356,12 +1501,12 @@ def interpolate(a, b, t):
@export
def row_layout(children):
- return StackLayout(type='row', children=children)
+ return StackLayout(type="row", children=children)
@export
def column_layout(children):
- return StackLayout(type='column', children=children)
+ return StackLayout(type="column", children=children)
def interpolate_layout(a, b, t):
@@ -1373,40 +1518,56 @@ def interpolate_layout(a, b, t):
@export
class LayerGroupViewer(JsonObjectWrapper):
__slots__ = ()
- type = wrapped_property('type', text_type)
- flex = wrapped_property('flex', optional(float, 1))
- layers = wrapped_property('layers', typed_list(text_type))
- layout = wrapped_property('layout', data_panel_layout_wrapper('xy'))
- position = wrapped_property('position', LinkedPosition)
- velocity = wrapped_property('velocity',
- typed_map(key_type=text_type, value_type=DimensionPlaybackVelocity))
+ type = wrapped_property("type", text_type)
+ flex = wrapped_property("flex", optional(float, 1))
+ layers = wrapped_property("layers", typed_list(text_type))
+ layout = wrapped_property("layout", data_panel_layout_wrapper("xy"))
+ position = wrapped_property("position", LinkedPosition)
+ velocity = wrapped_property(
+ "velocity", typed_map(key_type=text_type, value_type=DimensionPlaybackVelocity)
+ )
cross_section_orientation = crossSectionOrientation = wrapped_property(
- 'crossSectionOrientation', LinkedOrientationState)
- cross_section_scale = crossSectionScale = wrapped_property('crossSectionScale',
- LinkedZoomFactor)
- cross_section_depth = crossSectionDepth = wrapped_property('crossSectionDepth',
- LinkedDepthRange)
- projection_orientation = projectionOrientation = wrapped_property('projectionOrientation',
- LinkedOrientationState)
- projection_scale = projectionScale = wrapped_property('projectionScale', LinkedZoomFactor)
- projection_depth = projectionDepth = wrapped_property('projectionDepth', LinkedDepthRange)
- tool_bindings = toolBindings = wrapped_property('toolBindings',
- typed_map(key_type=text_type, value_type=tool))
+ "crossSectionOrientation", LinkedOrientationState
+ )
+ cross_section_scale = crossSectionScale = wrapped_property(
+ "crossSectionScale", LinkedZoomFactor
+ )
+ cross_section_depth = crossSectionDepth = wrapped_property(
+ "crossSectionDepth", LinkedDepthRange
+ )
+ projection_orientation = projectionOrientation = wrapped_property(
+ "projectionOrientation", LinkedOrientationState
+ )
+ projection_scale = projectionScale = wrapped_property(
+ "projectionScale", LinkedZoomFactor
+ )
+ projection_depth = projectionDepth = wrapped_property(
+ "projectionDepth", LinkedDepthRange
+ )
+ tool_bindings = toolBindings = wrapped_property(
+ "toolBindings", typed_map(key_type=text_type, value_type=tool)
+ )
def __init__(self, *args, **kwargs):
- super(LayerGroupViewer, self).__init__(*args, **kwargs)
- self.type = 'viewer'
+ super().__init__(*args, **kwargs)
+ self.type = "viewer"
def __repr__(self):
j = self.to_json()
- j.pop('type', None)
- return u'%s(%s)' % (type(self).__name__, encode_json_for_repr(j))
+ j.pop("type", None)
+ return f"{type(self).__name__}({encode_json_for_repr(j)})"
@staticmethod
def interpolate(a, b, t):
c = copy.deepcopy(a)
- for k in ('layout', 'position', 'cross_section_orientation', 'cross_section_zoom',
- 'perspective_orientation', 'perspective_zoom'):
+ for k in (
+ "layout",
+ "position",
+ "cross_section_orientation",
+ "cross_section_zoom",
+ "perspective_orientation",
+ "perspective_zoom",
+ ):
a_attr = getattr(a, k)
b_attr = getattr(b, k)
setattr(c, k, type(a_attr).interpolate(a_attr, b_attr, t))
@@ -1414,9 +1575,9 @@ def interpolate(a, b, t):
layout_types = {
- 'row': StackLayout,
- 'column': StackLayout,
- 'viewer': LayerGroupViewer,
+ "row": StackLayout,
+ "column": StackLayout,
+ "viewer": LayerGroupViewer,
}
@@ -1431,59 +1592,100 @@ def add_data_panel_layout_types():
@export
class ViewerState(JsonObjectWrapper):
__slots__ = ()
- title = wrapped_property('title', optional(text_type))
- dimensions = wrapped_property('dimensions', CoordinateSpace)
+ title = wrapped_property("title", optional(text_type))
+ dimensions = wrapped_property("dimensions", CoordinateSpace)
relative_display_scales = relativeDisplayScales = wrapped_property(
- 'relativeDisplayScales', optional(typed_string_map(float)))
- display_dimensions = displayDimensions = wrapped_property('displayDimensions',
- optional(typed_list(text_type)))
- position = voxel_coordinates = wrapped_property('position', optional(array_wrapper(np.float32)))
- velocity = wrapped_property('velocity',
- typed_map(key_type=text_type, value_type=DimensionPlaybackVelocity))
+ "relativeDisplayScales", optional(typed_string_map(float))
+ )
+ display_dimensions = displayDimensions = wrapped_property(
+ "displayDimensions", optional(typed_list(text_type))
+ )
+ position = voxel_coordinates = wrapped_property(
+ "position", optional(array_wrapper(np.float32))
+ )
+ velocity = wrapped_property(
+ "velocity", typed_map(key_type=text_type, value_type=DimensionPlaybackVelocity)
+ )
cross_section_orientation = crossSectionOrientation = wrapped_property(
- 'crossSectionOrientation', optional(array_wrapper(np.float32, 4)))
- cross_section_scale = crossSectionScale = wrapped_property('crossSectionScale', optional(float))
- cross_section_depth = crossSectionDepth = wrapped_property('crossSectionDepth', optional(float))
- projection_scale = projectionScale = wrapped_property('projectionScale', optional(float))
- projection_depth = projectionDepth = wrapped_property('projectionDepth', optional(float))
- projection_orientation = projectionOrientation = perspectiveOrientation = perspective_orientation = wrapped_property(
- 'projectionOrientation', optional(array_wrapper(np.float32, 4)))
- show_slices = showSlices = wrapped_property('showSlices', optional(bool, True))
- show_axis_lines = showAxisLines = wrapped_property('showAxisLines', optional(bool, True))
- show_scale_bar = showScaleBar = wrapped_property('showScaleBar', optional(bool, True))
+ "crossSectionOrientation", optional(array_wrapper(np.float32, 4))
+ )
+ cross_section_scale = crossSectionScale = wrapped_property(
+ "crossSectionScale", optional(float)
+ )
+ cross_section_depth = crossSectionDepth = wrapped_property(
+ "crossSectionDepth", optional(float)
+ )
+ projection_scale = projectionScale = wrapped_property(
+ "projectionScale", optional(float)
+ )
+ projection_depth = projectionDepth = wrapped_property(
+ "projectionDepth", optional(float)
+ )
+ projection_orientation = (
+ projectionOrientation
+ ) = perspectiveOrientation = perspective_orientation = wrapped_property(
+ "projectionOrientation", optional(array_wrapper(np.float32, 4))
+ )
+ show_slices = showSlices = wrapped_property("showSlices", optional(bool, True))
+ show_axis_lines = showAxisLines = wrapped_property(
+ "showAxisLines", optional(bool, True)
+ )
+ show_scale_bar = showScaleBar = wrapped_property(
+ "showScaleBar", optional(bool, True)
+ )
show_default_annotations = showDefaultAnnotations = wrapped_property(
- 'showDefaultAnnotations', optional(bool, True))
- gpu_memory_limit = gpuMemoryLimit = wrapped_property('gpuMemoryLimit', optional(int))
- system_memory_limit = systemMemoryLimit = wrapped_property('systemMemoryLimit', optional(int))
- concurrent_downloads = concurrentDownloads = wrapped_property('concurrentDownloads',
- optional(int))
- prefetch = wrapped_property('prefetch', optional(bool, True))
- layers = wrapped_property('layers', Layers)
- layout = wrapped_property('layout', layout_specification)
+ "showDefaultAnnotations", optional(bool, True)
+ )
+ gpu_memory_limit = gpuMemoryLimit = wrapped_property(
+ "gpuMemoryLimit", optional(int)
+ )
+ system_memory_limit = systemMemoryLimit = wrapped_property(
+ "systemMemoryLimit", optional(int)
+ )
+ concurrent_downloads = concurrentDownloads = wrapped_property(
+ "concurrentDownloads", optional(int)
+ )
+ prefetch = wrapped_property("prefetch", optional(bool, True))
+ layers = wrapped_property("layers", Layers)
+ layout = wrapped_property("layout", layout_specification)
cross_section_background_color = crossSectionBackgroundColor = wrapped_property(
- 'crossSectionBackgroundColor', optional(text_type))
+ "crossSectionBackgroundColor", optional(text_type)
+ )
projection_background_color = projectionBackgroundColor = wrapped_property(
- 'projectionBackgroundColor', optional(text_type))
- selected_layer = selectedLayer = wrapped_property('selectedLayer', SelectedLayerState)
- statistics = wrapped_property('statistics', StatisticsDisplayState)
- help_panel = helpPanel = wrapped_property('helpPanel', HelpPanelState)
- layer_list_panel = layerListPanel = wrapped_property('layerListPanel', LayerListPanelState)
+ "projectionBackgroundColor", optional(text_type)
+ )
+ selected_layer = selectedLayer = wrapped_property(
+ "selectedLayer", SelectedLayerState
+ )
+ statistics = wrapped_property("statistics", StatisticsDisplayState)
+ help_panel = helpPanel = wrapped_property("helpPanel", HelpPanelState)
+ layer_list_panel = layerListPanel = wrapped_property(
+ "layerListPanel", LayerListPanelState
+ )
partial_viewport = partialViewport = wrapped_property(
- 'partialViewport',
- optional(array_wrapper(np.float64, 4), np.array([0, 0, 1, 1], dtype=np.float64)))
- tool_bindings = toolBindings = wrapped_property('toolBindings',
- typed_map(key_type=text_type, value_type=tool))
+ "partialViewport",
+ optional(
+ array_wrapper(np.float64, 4), np.array([0, 0, 1, 1], dtype=np.float64)
+ ),
+ )
+ tool_bindings = toolBindings = wrapped_property(
+ "toolBindings", typed_map(key_type=text_type, value_type=tool)
+ )
@staticmethod
def interpolate(a, b, t):
c = copy.deepcopy(a)
c.position = interpolate_linear_optional_vectors(a.position, b.position, t)
c.projection_scale = interpolate_zoom(a.projection_scale, b.projection_scale, t)
- c.projection_orientation = quaternion_slerp(a.projection_orientation,
- b.projection_orientation, t)
- c.cross_section_scale = interpolate_zoom(a.cross_section_scale, b.cross_section_scale, t)
- c.cross_section_orientation = quaternion_slerp(a.cross_section_orientation,
- b.cross_section_orientation, t)
+ c.projection_orientation = quaternion_slerp(
+ a.projection_orientation, b.projection_orientation, t
+ )
+ c.cross_section_scale = interpolate_zoom(
+ a.cross_section_scale, b.cross_section_scale, t
+ )
+ c.cross_section_orientation = quaternion_slerp(
+ a.cross_section_orientation, b.cross_section_orientation, t
+ )
c.layers = Layers.interpolate(a.layers, b.layers, t)
c.layout = interpolate_layout(a.layout, b.layout, t)
return c
diff --git a/python/neuroglancer/webdriver.py b/python/neuroglancer/webdriver.py
index f24abe641e..1df8350971 100644
--- a/python/neuroglancer/webdriver.py
+++ b/python/neuroglancer/webdriver.py
@@ -1,4 +1,3 @@
-# coding=utf-8
# @license
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,25 +13,38 @@
# limitations under the License.
"""Interface for controlling a browser that runs Neuroglancer."""
-from typing import Sequence, Optional
-
-import tempfile
-import time
+import contextlib
+import re
+import sys
import threading
+import time
+from collections.abc import Sequence
+from typing import Callable, NamedTuple, Optional
+
+
+class LogMessage(NamedTuple):
+ message: str
+ level: Optional[str]
+
+
+LogListener = Callable[[LogMessage], None]
class Webdriver:
- def __init__(self,
- viewer=None,
- headless=True,
- browser='chrome',
- window_size=(1920, 1080),
- debug=False,
- docker=False,
- print_logs=True,
- extra_command_line_args: Optional[Sequence[str]] = None):
+ def __init__(
+ self,
+ viewer=None,
+ headless=True,
+ browser="chrome",
+ window_size=(1920, 1080),
+ debug=False,
+ docker=False,
+ print_logs=True,
+ extra_command_line_args: Optional[Sequence[str]] = None,
+ ):
if viewer is None:
from .viewer import Viewer
+
viewer = Viewer()
self.viewer = viewer
self.headless = headless
@@ -40,123 +52,131 @@ def __init__(self,
self.window_size = window_size
self.headless = headless
self.docker = docker
- self.extra_command_line_args = list(extra_command_line_args) if extra_command_line_args else []
+ self.extra_command_line_args = (
+ list(extra_command_line_args) if extra_command_line_args else []
+ )
self.debug = debug
- self._logfile = None
- if browser == 'firefox':
- self._logfile = tempfile.NamedTemporaryFile(suffix='neuroglancer-geckodriver.log')
self._init_driver()
- self._pending_logs = []
- self._pending_logs_to_print = []
- self._logs_lock = threading.Lock()
- self._closed = False
+ self._log_listeners_lock = threading.Lock()
+ self._log_listeners: dict[LogListener, None] = {}
- def print_log_handler():
- while True:
- logs_to_print = self._get_logs_to_print()
- if logs_to_print:
- print('\n'.join(x['message'] for x in logs_to_print))
- if self._closed:
- break
- time.sleep(1)
+ if print_logs:
+ self.add_log_listener(
+ lambda log: print(
+ f"console.{log.level}: {log.message}", file=sys.stderr
+ )
+ )
- t = threading.Thread(target=print_log_handler)
- t.daemon = True
- t.start()
+ self._closed = False
def _init_chrome(self):
import selenium.webdriver
- import selenium.webdriver.chrome.options
- import selenium.webdriver.common.service
- import selenium.webdriver.chrome.service
- import selenium.webdriver.common.desired_capabilities
- chrome_options = selenium.webdriver.chrome.options.Options()
+ chrome_options = selenium.webdriver.ChromeOptions()
if self.headless:
- chrome_options.add_argument('--headless')
- chrome_options.add_experimental_option("excludeSwitches", ['enable-automation'])
+ chrome_options.add_argument("--headless")
+ chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
if self.docker:
# https://www.intricatecloud.io/2019/05/running-webdriverio-tests-using-headless-chrome-inside-a-container/
- chrome_options.add_argument('--no-sandbox')
- chrome_options.add_argument('--disable-gpu')
- chrome_options.add_argument('--disable-setuid-sandbox')
- chrome_options.add_argument('--disable-dev-shm-usage')
- chrome_options.add_argument('--window_size=%dx%d' %
- (self.window_size[0], self.window_size[1]))
+ chrome_options.add_argument("--no-sandbox")
+ chrome_options.add_argument("--disable-gpu")
+ chrome_options.add_argument("--disable-setuid-sandbox")
+ chrome_options.add_argument("--disable-dev-shm-usage")
+ chrome_options.add_argument(
+ "--window_size=%dx%d" % (self.window_size[0], self.window_size[1])
+ )
for arg in self.extra_command_line_args:
chrome_options.add_argument(arg)
- caps = chrome_options._caps
- caps['goog:loggingPrefs'] = {'browser': 'ALL'}
self.driver = selenium.webdriver.Chrome(options=chrome_options)
def _init_firefox(self):
import selenium.webdriver
- import selenium.webdriver.firefox.firefox_binary
- profile = selenium.webdriver.FirefoxProfile()
- profile.set_preference('devtools.console.stdout.content', True)
- binary = selenium.webdriver.firefox.firefox_binary.FirefoxBinary()
- for arg in self.extra_command_line_args:
- binary.add_command_line_options(arg)
- self.driver = selenium.webdriver.Firefox(firefox_profile=profile,
- firefox_binary=binary,
- service_log_path=self._logfile.name)
+
+ options = selenium.webdriver.FirefoxOptions()
+ options.arguments.extend(self.extra_command_line_args)
+ self.driver = selenium.webdriver.Firefox(
+ options=options,
+ )
def _init_driver(self):
- if self.browser == 'chrome':
+ import trio
+
+ if self.browser == "chrome":
self._init_chrome()
- elif self.browser == 'firefox':
+ elif self.browser == "firefox":
self._init_firefox()
else:
- raise ValueError('unsupported browser: %s, must be "chrome" or "firefox"' %
- (self.browser, ))
+ raise ValueError(
+ f'unsupported browser: {self.browser}, must be "chrome" or "firefox"'
+ )
+
+ def log_handler(driver):
+ async def start_listening(listener):
+ async for event in listener:
+ message = LogMessage(message=event.args[0].value, level=event.type_)
+ with self._log_listeners_lock:
+ for listener in self._log_listeners:
+ listener(message)
+
+ async def run():
+ async with self.driver.bidi_connection() as connection:
+ session, devtools = connection.session, connection.devtools
+ await session.execute(devtools.page.enable())
+ await session.execute(devtools.runtime.enable())
+ listener = session.listen(devtools.runtime.ConsoleAPICalled)
+ with trio.CancelScope() as cancel_scope:
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(start_listening, listener)
+ while True:
+ await trio.sleep(2)
+ if not driver.service.is_connectable():
+ cancel_scope.cancel()
+
+ trio.run(run)
+
+ t = threading.Thread(target=log_handler, args=(self.driver,))
+ t.daemon = True
+ t.start()
+
self.driver.get(self.viewer.get_viewer_url())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
- if self._logfile is not None:
- self._logfile.file.close()
- self._logfile = None
self.driver.quit()
self._closed = True
- def _get_new_logs(self):
- if self.browser == 'chrome':
- new_logs = self.driver.get_log('browser')
- else:
- cur_offset = self._logfile.file.tell()
- new_data = self._logfile.file.read()
- # rfind may return -1, still works
- end_within_data = new_data.rfind(b'\n') + 1
- new_data = new_data[:end_within_data]
- self._logfile.file.seek(cur_offset + end_within_data)
- new_logs = []
- for msg in new_data.decode().split('\n'):
- msg = msg.strip()
- if not msg: continue
- if (not msg.startswith('console.log: ') and not msg.startswith('JavaScript ')):
- continue
- new_logs.append({'message': msg})
- self._pending_logs.extend(new_logs)
- self._pending_logs_to_print.extend(new_logs)
-
- def _get_logs_to_print(self):
- with self._logs_lock:
- self._get_new_logs()
- new_logs = self._pending_logs_to_print
- self._pending_logs_to_print = []
- return new_logs
-
- def get_log(self):
- with self._logs_lock:
- self._get_new_logs()
- new_logs = self._pending_logs
- self._pending_logs = []
- return new_logs
-
- def get_log_messages(self):
- return '\n'.join(x['message'] for x in self.get_log())
+ def add_log_listener(self, listener: LogListener):
+ with self._log_listeners_lock:
+ self._log_listeners[listener] = None
+
+ def remove_log_listener(self, listener: LogListener):
+ with self._log_listeners_lock:
+ return self._log_listeners.pop(listener, True) is None
+
+ @contextlib.contextmanager
+ def log_listener(self, listener: LogListener):
+ try:
+ self.add_log_listener(listener)
+ yield
+ finally:
+ self.remove_log_listener(listener)
+
+ @contextlib.contextmanager
+ def wait_for_log_message(self, pattern: str, timeout: Optional[float] = None):
+ event = threading.Event()
+
+ def handle_message(msg):
+ if event.is_set():
+ return
+ if re.fullmatch(pattern, msg.message):
+ event.set()
+
+ with self.log_listener(handle_message):
+ yield
+ if not event.wait(timeout):
+ raise TimeoutError
def sync(self):
"""Wait until client is ready."""
@@ -170,17 +190,17 @@ def sync(self):
def reload_browser(self):
"""Reloads the browser (useful if it crashes/becomes unresponsive)."""
- with self._logs_lock:
- try:
- self.driver.quit()
- except:
- pass
- self._init_driver()
+ try:
+ self.driver.quit()
+ except Exception:
+ pass
+ self._init_driver()
@property
def root_element(self):
- return self.driver.find_element('xpath', '//body')
+ return self.driver.find_element("xpath", "//body")
def action_chain(self):
import selenium.webdriver
+
return selenium.webdriver.common.action_chains.ActionChains(self.driver)
diff --git a/python/neuroglancer/write_annotations.py b/python/neuroglancer/write_annotations.py
index 2408b5d766..ffcb31f748 100644
--- a/python/neuroglancer/write_annotations.py
+++ b/python/neuroglancer/write_annotations.py
@@ -14,16 +14,17 @@
least one file written per annotation.
"""
-from . import coordinate_space
-from typing import List, Sequence, NamedTuple, Optional
-from typing_extensions import Literal
-from . import viewer_state
-import numbers
-import io
import json
+import numbers
import os
-import numpy as np
+import pathlib
import struct
+from collections.abc import Sequence
+from typing import Literal, NamedTuple, Optional, Union, cast
+
+import numpy as np
+
+from . import coordinate_space, viewer_state
class Annotation(NamedTuple):
@@ -32,27 +33,31 @@ class Annotation(NamedTuple):
relationships: Sequence[Sequence[int]]
-_PROPERTY_DTYPES = {
- 'uint8': (('|u1', ), 1),
- 'uint16': ((' Iterator[Callable[[pathlib.Path], str]]:
+ servers: list[neuroglancer.static_file_server.StaticFileServer] = []
- servers: neuroglancer.static_file_server.StaticFileServer = []
def serve_path(path: pathlib.Path):
server = neuroglancer.static_file_server.StaticFileServer(str(path))
servers.append(server)
diff --git a/python/tests/context_lost_test.py b/python/tests/context_lost_test.py
index 4a3c599a27..296df2bd71 100644
--- a/python/tests/context_lost_test.py
+++ b/python/tests/context_lost_test.py
@@ -13,43 +13,48 @@
# limitations under the License.
"""Tests WebGL context lose/restore handling."""
-import numpy as np
+
import neuroglancer
-import time
+import numpy as np
+
def test_context_lost(webdriver):
a = np.array([[[255]]], dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=neuroglancer.ImageLayer(
source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
- shader='void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }',
+ shader="void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }",
),
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.position = [0.5, 0.5, 0.5]
s.show_axis_lines = False
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
- webdriver.driver.execute_script('''
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
+ with webdriver.wait_for_log_message("Lost WebGL context.*", timeout=3):
+ webdriver.driver.execute_script(
+ """
window.webglLoseContext = viewer.gl.getExtension('WEBGL_lose_context');
window.webglLoseContext.loseContext();
-''')
- time.sleep(3) # Wait a few seconds for log messages to be written
- browser_log = webdriver.get_log_messages()
- assert 'Lost WebGL context' in browser_log
- webdriver.driver.execute_script('''
+"""
+ )
+ with webdriver.wait_for_log_message("WebGL context restored.*", timeout=3):
+ webdriver.driver.execute_script(
+ """
window.webglLoseContext.restoreContext();
-''')
- time.sleep(3) # Wait a few seconds for log messages to be written
- browser_log = webdriver.get_log_messages()
- assert 'WebGL context restored' in browser_log
+"""
+ )
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
diff --git a/python/tests/display_dimensions_test.py b/python/tests/display_dimensions_test.py
index 8d3b1eb7ce..40144c01a8 100644
--- a/python/tests/display_dimensions_test.py
+++ b/python/tests/display_dimensions_test.py
@@ -13,34 +13,40 @@
# limitations under the License.
"""Tests that display_dimensions can be set."""
-import numpy as np
import neuroglancer
+import numpy as np
import pytest
-@pytest.mark.parametrize('display_dimensions,layout,key,expected_position', [
- (['x', 'y', 'z'], 'xy', 'LEFT', [5, 5, 4]),
- (['x', 'y', 'z'], 'xy', 'RIGHT', [5, 5, 6]),
- (['x', 'y', 'z'], 'xy', 'UP', [5, 4, 5]),
- (['x', 'y', 'z'], 'xy', 'DOWN', [5, 6, 5]),
- (['x', 'y', 'z'], 'xy', ',', [4, 5, 5]),
- (['x', 'y', 'z'], 'xy', '.', [6, 5, 5]),
- (['z', 'y', 'x'], 'xy', 'LEFT', [4, 5, 5]),
- (['z', 'y', 'x'], 'xy', 'RIGHT', [6, 5, 5]),
- (['z', 'y', 'x'], 'xy', 'UP', [5, 4, 5]),
- (['z', 'y', 'x'], 'xy', 'DOWN', [5, 6, 5]),
- (['z', 'y', 'x'], 'xy', ',', [5, 5, 4]),
- (['z', 'y', 'x'], 'xy', '.', [5, 5, 6]),
-])
-def test_display_dimensions(webdriver, display_dimensions, layout, key, expected_position):
+@pytest.mark.parametrize(
+ "display_dimensions,layout,key,expected_position",
+ [
+ (["x", "y", "z"], "xy", "LEFT", [5, 5, 4]),
+ (["x", "y", "z"], "xy", "RIGHT", [5, 5, 6]),
+ (["x", "y", "z"], "xy", "UP", [5, 4, 5]),
+ (["x", "y", "z"], "xy", "DOWN", [5, 6, 5]),
+ (["x", "y", "z"], "xy", ",", [4, 5, 5]),
+ (["x", "y", "z"], "xy", ".", [6, 5, 5]),
+ (["z", "y", "x"], "xy", "LEFT", [4, 5, 5]),
+ (["z", "y", "x"], "xy", "RIGHT", [6, 5, 5]),
+ (["z", "y", "x"], "xy", "UP", [5, 4, 5]),
+ (["z", "y", "x"], "xy", "DOWN", [5, 6, 5]),
+ (["z", "y", "x"], "xy", ",", [5, 5, 4]),
+ (["z", "y", "x"], "xy", ".", [5, 5, 6]),
+ ],
+)
+def test_display_dimensions(
+ webdriver, display_dimensions, layout, key, expected_position
+):
from selenium.webdriver.common.keys import Keys
+
if len(key) > 0 and hasattr(Keys, key):
key = getattr(Keys, key)
a = np.zeros((10, 10, 10), dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["z", "y", "x"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["z", "y", "x"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=neuroglancer.ImageLayer(
@@ -51,6 +57,8 @@ def test_display_dimensions(webdriver, display_dimensions, layout, key, expected
s.layout = layout
s.position = [5.5, 5.5, 5.5]
webdriver.sync()
- webdriver.action_chain().move_to_element_with_offset(webdriver.root_element, 100, 100).click().send_keys(key).perform()
+ webdriver.action_chain().move_to_element_with_offset(
+ webdriver.root_element, 100, 100
+ ).click().send_keys(key).perform()
webdriver.sync()
assert np.floor(webdriver.viewer.state.position).tolist() == expected_position
diff --git a/python/tests/equivalence_map_test.py b/python/tests/equivalence_map_test.py
index 0a0f3ec4b7..e5d212b902 100644
--- a/python/tests/equivalence_map_test.py
+++ b/python/tests/equivalence_map_test.py
@@ -13,56 +13,54 @@
# limitations under the License.
"""Tests for equivalence_map.py"""
-from __future__ import absolute_import
from neuroglancer import equivalence_map
def test_basic():
-
m = equivalence_map.EquivalenceMap()
for i in range(24):
- assert 0 == m[i]
+ assert m[i] == 0
assert i + 1 == m[i + 1]
- assert set([i + 1]) == set(m.members(i + 1))
+ assert {i + 1} == set(m.members(i + 1))
result = m.union(i, i + 1)
assert result == 0
- assert 0 == m[i]
- assert 0 == m[i + 1]
+ assert m[i] == 0
+ assert m[i + 1] == 0
assert set(range(i + 2)) == set(m.members(i))
for i in range(25, 49):
- assert 25 == m[i]
+ assert m[i] == 25
assert i + 1 == m[i + 1]
result = m.union(i, i + 1)
assert result == 25
- assert 25 == m[i]
- assert 25 == m[i + 1]
+ assert m[i] == 25
+ assert m[i + 1] == 25
assert set(range(25, i + 2)) == set(m.members(i))
assert m[15] != m[40]
result = m.union(15, 40)
- assert 0 == result
- assert 0 == m[15]
- assert 0 == m[40]
+ assert result == 0
+ assert m[15] == 0
+ assert m[40] == 0
assert set(range(50)) == set(m.members(15))
for i in range(50):
- assert 0 == m[i]
+ assert m[i] == 0
assert set(range(50)) == set(m.members(i))
for i in range(51, 100):
- assert set([i]) == set(m.members(i))
+ assert {i} == set(m.members(i))
def test_init_simple():
m = equivalence_map.EquivalenceMap([[1, 2, 3], [4, 5]])
- assert 1 == m[1]
- assert 1 == m[2]
- assert 1 == m[3]
- assert 4 == m[4]
- assert 4 == m[5]
- assert set([1, 2, 3, 4, 5]) == set(m.keys())
+ assert m[1] == 1
+ assert m[2] == 1
+ assert m[3] == 1
+ assert m[4] == 4
+ assert m[5] == 4
+ assert {1, 2, 3, 4, 5} == set(m.keys())
assert [[1, 2, 3], [4, 5]] == m.to_json()
diff --git a/python/tests/fill_value_test.py b/python/tests/fill_value_test.py
index f11eae6885..6b75146e85 100644
--- a/python/tests/fill_value_test.py
+++ b/python/tests/fill_value_test.py
@@ -13,7 +13,6 @@
# limitations under the License.
import pathlib
-from typing import Tuple
import neuroglancer
import numpy as np
@@ -23,84 +22,83 @@
def check_screenshot_color(webdriver, source, expected_value):
-
with webdriver.viewer.txn() as s:
# s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z", "c"],
# units=["nm", "nm", "nm", ""],
# scales=[1, 1, 1, 1])
- s.layout = 'xy'
+ s.layout = "xy"
# s.cross_section_scale = 1e-6
s.show_axis_lines = False
s.layers.append(
name="a",
- layer=neuroglancer.SegmentationLayer(source=neuroglancer.LayerDataSource(
- url=source,
- # transform=neuroglancer.CoordinateSpaceTransform(input_dimensions=s.dimensions,
- # output_dimensions=s.dimensions)
+ layer=neuroglancer.SegmentationLayer(
+ source=neuroglancer.LayerDataSource(
+ url=source,
+ # transform=neuroglancer.CoordinateSpaceTransform(input_dimensions=s.dimensions,
+ # output_dimensions=s.dimensions)
+ ),
+ hide_segment_zero=False,
+ hover_highlight=False,
+ segment_colors={expected_value: "#ff0000"},
),
- hide_segment_zero=False,
- hover_highlight=False,
- segment_colors={expected_value: "#ff0000"}))
+ )
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
@pytest.mark.parametrize("driver", ["neuroglancer_precomputed", "zarr", "n5"])
-def test_zero_fill_value(tempdir_server: Tuple[pathlib.Path, str], webdriver, driver: str):
+def test_zero_fill_value(
+ tempdir_server: tuple[pathlib.Path, str], webdriver, driver: str
+):
tmp_path, server_url = tempdir_server
- ts.open({
- "driver": driver,
- "kvstore": {
- "driver": "file",
- "path": str(tmp_path)
- }
- },
- create=True,
- dtype=ts.uint8,
- shape=[100, 200, 300, 1]).result()
+ ts.open(
+ {"driver": driver, "kvstore": {"driver": "file", "path": str(tmp_path)}},
+ create=True,
+ dtype=ts.uint8,
+ shape=[100, 200, 300, 1],
+ ).result()
protocol = driver if driver != "neuroglancer_precomputed" else "precomputed"
check_screenshot_color(webdriver, f"{protocol}://{server_url}", expected_value=0)
-def test_nonzero_fill_value(tempdir_server: Tuple[pathlib.Path, str], webdriver):
+def test_nonzero_fill_value(tempdir_server: tuple[pathlib.Path, str], webdriver):
tmp_path, server_url = tempdir_server
- ts.open({
- "driver": "zarr",
- "kvstore": {
- "driver": "file",
- "path": str(tmp_path)
- }
- },
- create=True,
- fill_value=42,
- dtype=ts.uint8,
- shape=[100, 200, 300, 1]).result()
+ ts.open(
+ {"driver": "zarr", "kvstore": {"driver": "file", "path": str(tmp_path)}},
+ create=True,
+ fill_value=42,
+ dtype=ts.uint8,
+ shape=[100, 200, 300, 1],
+ ).result()
protocol = "zarr"
check_screenshot_color(webdriver, f"{protocol}://{server_url}", expected_value=42)
@pytest.mark.parametrize("dtype", [ts.uint32, ts.uint64])
-def test_compressed_segmentation_fill_value(tempdir_server: Tuple[pathlib.Path, str], webdriver, dtype: ts.dtype):
+def test_compressed_segmentation_fill_value(
+ tempdir_server: tuple[pathlib.Path, str], webdriver, dtype
+):
tmp_path, server_url = tempdir_server
- ts.open({
- "driver": "neuroglancer_precomputed",
- "kvstore": {
- "driver": "file",
- "path": str(tmp_path)
- },
- "scale_metadata": {
- "encoding": "compressed_segmentation",
+ ts.open(
+ {
+ "driver": "neuroglancer_precomputed",
+ "kvstore": {"driver": "file", "path": str(tmp_path)},
+ "scale_metadata": {
+ "encoding": "compressed_segmentation",
+ },
},
- },
- create=True,
- dtype=dtype,
- shape=[100, 200, 300, 1]).result()
+ create=True,
+ dtype=dtype,
+ shape=[100, 200, 300, 1],
+ ).result()
protocol = "precomputed"
check_screenshot_color(webdriver, f"{protocol}://{server_url}", expected_value=0)
diff --git a/python/tests/linked_segment_group_test.py b/python/tests/linked_segment_group_test.py
index 014363b8e8..90009fa6a7 100644
--- a/python/tests/linked_segment_group_test.py
+++ b/python/tests/linked_segment_group_test.py
@@ -13,7 +13,6 @@
# limitations under the License.
"""Tests for linked_segmentation_{,color}group."""
-from __future__ import absolute_import
import neuroglancer
import numpy as np
@@ -23,14 +22,14 @@ def test_linked_segmentation_group(webdriver):
a = np.array([[[42]]], dtype=np.uint8)
b = np.array([[[43]]], dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=neuroglancer.SegmentationLayer(
source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
- segment_default_color='#f00',
+ segment_default_color="#f00",
segments=[43],
),
visible=False,
@@ -39,24 +38,28 @@ def test_linked_segmentation_group(webdriver):
name="b",
layer=neuroglancer.SegmentationLayer(
source=neuroglancer.LocalVolume(data=b, dimensions=s.dimensions),
- linked_segmentation_group='a',
+ linked_segmentation_group="a",
),
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
s.position = [0.5, 0.5, 0.5]
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
- assert screenshot_response.viewer_state.layers[0].segment_default_color == '#ff0000'
+ assert screenshot_response.viewer_state.layers[0].segment_default_color == "#ff0000"
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
with webdriver.viewer.txn() as s:
s.layers[1].linked_segmentation_color_group = False
- s.layers[1].segment_default_color = '#0f0'
+ s.layers[1].segment_default_color = "#0f0"
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
diff --git a/python/tests/local_volume_test.py b/python/tests/local_volume_test.py
index 58a7c90b94..20d67b5b7e 100644
--- a/python/tests/local_volume_test.py
+++ b/python/tests/local_volume_test.py
@@ -12,32 +12,36 @@ def test_numpy_data():
dimensions = neuroglancer.CoordinateSpace(
names=local_volume.data._default_labels,
units=local_volume.data._default_units,
- scales=local_volume.data._default_scales)
+ scales=local_volume.data._default_scales,
+ )
assert local_volume.dimensions.to_json() == dimensions.to_json()
def test_tensorstore_defaults():
ts = pytest.importorskip("tensorstore")
shape = (8, 16, 32)
- data = ts.open({
- 'driver': 'n5',
- 'kvstore': {
- 'driver': 'memory',
- },
- 'metadata': {
- 'dataType': 'uint8',
- 'dimensions': shape,
- },
- 'create': True,
- 'delete_existing': True,
- }).result()
+ data = ts.open(
+ {
+ "driver": "n5",
+ "kvstore": {
+ "driver": "memory",
+ },
+ "metadata": {
+ "dataType": "uint8",
+ "dimensions": shape,
+ },
+ "create": True,
+ "delete_existing": True,
+ }
+ ).result()
local_volume = neuroglancer.LocalVolume(data)
assert local_volume.rank == len(shape)
assert local_volume.shape == shape
dimensions = neuroglancer.CoordinateSpace(
names=local_volume.data._default_labels,
units=local_volume.data._default_units,
- scales=local_volume.data._default_scales)
+ scales=local_volume.data._default_scales,
+ )
assert local_volume.dimensions.to_json() == dimensions.to_json()
@@ -45,32 +49,33 @@ def test_tensorstore_features():
ts = pytest.importorskip("tensorstore")
shape = (8, 16, 32)
offset = (2, 4, 8)
- labels = ['x', 'y', '']
- units = ['m', 'm', 'm']
- scales = [1., 1., 0.5]
- data = ts.open({
- 'driver': 'n5',
- 'kvstore': {
- 'driver': 'memory',
- },
- 'metadata': {
- 'dataType': 'uint8',
- 'dimensions': shape,
- 'units': units,
- 'resolution': scales,
- },
- 'transform': {
- 'input_labels': labels,
- 'input_inclusive_min': offset,
- },
- 'create': True,
- 'delete_existing': True,
- }).result()
+ labels = ["x", "y", ""]
+ units = ["m", "m", "m"]
+ scales = [1.0, 1.0, 0.5]
+ data = ts.open(
+ {
+ "driver": "n5",
+ "kvstore": {
+ "driver": "memory",
+ },
+ "metadata": {
+ "dataType": "uint8",
+ "dimensions": shape,
+ "units": units,
+ "resolution": scales,
+ },
+ "transform": {
+ "input_labels": labels,
+ "input_inclusive_min": offset,
+ },
+ "create": True,
+ "delete_existing": True,
+ }
+ ).result()
local_volume = neuroglancer.LocalVolume(data)
assert local_volume.rank == len(shape)
- assert local_volume.shape == tuple([s-o for s, o in zip(shape, offset)])
+ assert local_volume.shape == tuple([s - o for s, o in zip(shape, offset)])
dimensions = neuroglancer.CoordinateSpace(
- names=['x', 'y', 'd2'],
- units=units,
- scales=scales)
+ names=["x", "y", "d2"], units=units, scales=scales
+ )
assert local_volume.dimensions.to_json() == dimensions.to_json()
diff --git a/python/tests/managed_layer_test.py b/python/tests/managed_layer_test.py
index b156c4786d..819651b6c3 100644
--- a/python/tests/managed_layer_test.py
+++ b/python/tests/managed_layer_test.py
@@ -1,9 +1,10 @@
import neuroglancer
+
def test_visible():
- layer = neuroglancer.ManagedLayer('a', {'type': 'segmentation', 'visible': False})
- assert layer.name == 'a'
- assert layer.visible == False
- assert layer.to_json() == {'name': 'a', 'type': 'segmentation', 'visible': False}
+ layer = neuroglancer.ManagedLayer("a", {"type": "segmentation", "visible": False})
+ assert layer.name == "a"
+ assert layer.visible is False
+ assert layer.to_json() == {"name": "a", "type": "segmentation", "visible": False}
layer.visible = True
- assert layer.to_json() == {'name': 'a', 'type': 'segmentation'}
+ assert layer.to_json() == {"name": "a", "type": "segmentation"}
diff --git a/python/tests/merge_tool_test.py b/python/tests/merge_tool_test.py
index 4d2f8e5deb..f7e0011245 100644
--- a/python/tests/merge_tool_test.py
+++ b/python/tests/merge_tool_test.py
@@ -13,32 +13,27 @@
# limitations under the License.
"""Tests for merge_tool.py"""
-from __future__ import absolute_import
from neuroglancer.tool import merge_tool
-def test_basic():
+def test_basic():
mask = merge_tool.BlockMask()
mask.add(0, (5, 3, 1))
- assert mask.blocks == [{(5, 3, 1): 1}, {(2, 1, 0): 1}, {(1, 0, 0): 1}, {(0, 0, 0): 1}]
+ assert mask.blocks == [
+ {(5, 3, 1): 1},
+ {(2, 1, 0): 1},
+ {(1, 0, 0): 1},
+ {(0, 0, 0): 1},
+ ]
mask.add(0, (5, 3, 0))
assert mask.blocks == [
- {
- (5, 3, 0): 1,
- (5, 3, 1): 1
- },
- {
- (2, 1, 0): 2
- },
- {
- (1, 0, 0): 2
- },
- {
- (0, 0, 0): 2
- },
+ {(5, 3, 0): 1, (5, 3, 1): 1},
+ {(2, 1, 0): 2},
+ {(1, 0, 0): 2},
+ {(0, 0, 0): 2},
]
mask.add(0, (5, 2, 1))
@@ -55,31 +50,19 @@ def test_basic():
(5, 2, 0): 1,
(5, 2, 1): 1,
(5, 3, 0): 1,
- (5, 3, 1): 1
- },
- {
- (2, 1, 0): 7
- },
- {
- (1, 0, 0): 7
- },
- {
- (0, 0, 0): 7
+ (5, 3, 1): 1,
},
+ {(2, 1, 0): 7},
+ {(1, 0, 0): 7},
+ {(0, 0, 0): 7},
]
mask.add(0, (4, 3, 0))
assert mask.blocks == [
{},
- {
- (2, 1, 0): 8
- },
- {
- (1, 0, 0): 8
- },
- {
- (0, 0, 0): 8
- },
+ {(2, 1, 0): 8},
+ {(1, 0, 0): 8},
+ {(0, 0, 0): 8},
]
mask.remove(0, (4, 3, 0))
@@ -91,17 +74,11 @@ def test_basic():
(5, 2, 0): 1,
(5, 2, 1): 1,
(5, 3, 0): 1,
- (5, 3, 1): 1
- },
- {
- (2, 1, 0): 7
- },
- {
- (1, 0, 0): 7
- },
- {
- (0, 0, 0): 7
+ (5, 3, 1): 1,
},
+ {(2, 1, 0): 7},
+ {(1, 0, 0): 7},
+ {(0, 0, 0): 7},
]
mask.remove(1, (2, 1, 0))
diff --git a/python/tests/on_demand_mesh_generator_test.py b/python/tests/on_demand_mesh_generator_test.py
index fa2378704f..6040bc2302 100644
--- a/python/tests/on_demand_mesh_generator_test.py
+++ b/python/tests/on_demand_mesh_generator_test.py
@@ -12,16 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import os
import numpy as np
-from neuroglancer import local_volume
-from neuroglancer import viewer_state
-from neuroglancer import test_util
+from neuroglancer import local_volume, test_util, viewer_state
-testdata_dir = os.path.join(os.path.dirname(__file__), '..', 'testdata', 'mesh')
+testdata_dir = os.path.join(os.path.dirname(__file__), "..", "testdata", "mesh")
def test_simple_mesh():
@@ -30,16 +27,24 @@ def test_simple_mesh():
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
],
- dtype=np.uint64).transpose()
- data = np.pad(data, 1, 'constant')
- dimensions = viewer_state.CoordinateSpace(names=['x', 'y', 'z'],
- scales=[1, 1, 1],
- units=['m', 'm', 'm'],)
+ dtype=np.uint64,
+ ).transpose()
+ data = np.pad(data, 1, "constant")
+ dimensions = viewer_state.CoordinateSpace(
+ names=["x", "y", "z"],
+ scales=[1, 1, 1],
+ units=["m", "m", "m"],
+ )
vol = local_volume.LocalVolume(
- data, dimensions=dimensions,
+ data,
+ dimensions=dimensions,
mesh_options=dict(
max_quadrics_error=1e6,
),
)
- test_util.check_golden_contents(os.path.join(testdata_dir, 'simple1'), vol.get_object_mesh(1))
- test_util.check_golden_contents(os.path.join(testdata_dir, 'simple2'), vol.get_object_mesh(2))
+ test_util.check_golden_contents(
+ os.path.join(testdata_dir, "simple1"), vol.get_object_mesh(1)
+ )
+ test_util.check_golden_contents(
+ os.path.join(testdata_dir, "simple2"), vol.get_object_mesh(2)
+ )
diff --git a/python/tests/screenshot_test.py b/python/tests/screenshot_test.py
index 0604e5872f..ba28222bbf 100644
--- a/python/tests/screenshot_test.py
+++ b/python/tests/screenshot_test.py
@@ -13,26 +13,29 @@
# limitations under the License.
"""Tests basic screenshot functionality."""
-import numpy as np
import neuroglancer
+import numpy as np
+
def test_screenshot_basic(webdriver):
a = np.array([[[255]]], dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=neuroglancer.ImageLayer(
source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
- shader='void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }',
+ shader="void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }",
),
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
s.position = [0.5, 0.5, 0.5]
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
diff --git a/python/tests/segment_colors_test.py b/python/tests/segment_colors_test.py
index e0bdadab15..27f4aad275 100644
--- a/python/tests/segment_colors_test.py
+++ b/python/tests/segment_colors_test.py
@@ -13,11 +13,10 @@
# limitations under the License.
"""Tests for segment_colors and segment_default_color."""
-from __future__ import absolute_import
import neuroglancer
-from neuroglancer.segment_colors import (hash_function, hex_string_from_segment_id)
import numpy as np
+from neuroglancer.segment_colors import hash_function, hex_string_from_segment_id
def test_hash_function():
@@ -47,10 +46,10 @@ def test_hash_function():
def test_hex_string_from_segment_id():
- """ Test that the hex string obtained
+ """Test that the hex string obtained
via the Python implementation is identical to
the value obtained using the javascript implementation
- for a few different color seed/segment id combinations """
+ for a few different color seed/segment id combinations"""
color_seed = 0
segment_id = 39
result = hex_string_from_segment_id(color_seed, segment_id)
@@ -75,46 +74,52 @@ def test_hex_string_from_segment_id():
def test_segment_colors(webdriver):
a = np.array([[[42]]], dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=neuroglancer.SegmentationLayer(
source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
- segment_colors={42: '#f00'},
+ segment_colors={42: "#f00"},
),
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
s.position = [0.5, 0.5, 0.5]
assert list(s.layers[0].segment_colors.keys()) == [42]
- assert s.layers[0].segment_colors[42] == '#f00'
+ assert s.layers[0].segment_colors[42] == "#f00"
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
- assert screenshot_response.viewer_state.layers[0].segment_colors[42] == '#ff0000'
+ assert screenshot_response.viewer_state.layers[0].segment_colors[42] == "#ff0000"
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
with webdriver.viewer.txn() as s:
- s.layers[0].segment_colors[42] = '#0f0'
+ s.layers[0].segment_colors[42] = "#0f0"
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
- assert screenshot_response.viewer_state.layers[0].segment_colors[42] == '#00ff00'
+ assert screenshot_response.viewer_state.layers[0].segment_colors[42] == "#00ff00"
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
# Changing segment_default_color does not affect the color since an explicit color is specified.
with webdriver.viewer.txn() as s:
- s.layers[0].segment_default_color = '#fff'
+ s.layers[0].segment_default_color = "#fff"
webdriver.sync()
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
- assert screenshot_response.viewer_state.layers[0].segment_default_color == '#ffffff'
+ assert screenshot_response.viewer_state.layers[0].segment_default_color == "#ffffff"
screenshot = screenshot_response.screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
# Removing the explicit color causes the default color to be used.
with webdriver.viewer.txn() as s:
@@ -123,5 +128,6 @@ def test_segment_colors(webdriver):
screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
screenshot = screenshot_response.screenshot
np.testing.assert_array_equal(
- screenshot.image_pixels, np.tile(np.array([255, 255, 255, 255], dtype=np.uint8),
- (10, 10, 1)))
+ screenshot.image_pixels,
+ np.tile(np.array([255, 255, 255, 255], dtype=np.uint8), (10, 10, 1)),
+ )
diff --git a/python/tests/selected_values_test.py b/python/tests/selected_values_test.py
index 60f58b84eb..55b6b2c126 100644
--- a/python/tests/selected_values_test.py
+++ b/python/tests/selected_values_test.py
@@ -13,67 +13,85 @@
# limitations under the License.
"""Tests that selected values can be retrieved from actions."""
-import numpy as np
-import neuroglancer
import threading
+
+import neuroglancer
+import numpy as np
import pytest
def setup_viewer(viewer, dtype, value, layer_type):
a = np.array([[[value]]], dtype=dtype)
with viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
layer=layer_type(
source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
),
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
s.position = [0.5, 0.5, 0.5]
+
def get_selected_value(webdriver):
event = threading.Event()
result = [None]
+
def my_action(s):
result[0] = s
event.set()
- webdriver.viewer.actions.add('my-action', my_action)
+
+ webdriver.viewer.actions.add("my-action", my_action)
with webdriver.viewer.config_state.txn() as s:
s.show_ui_controls = False
s.show_panel_borders = False
- s.input_event_bindings.slice_view['click0'] = 'my-action'
+ s.input_event_bindings.slice_view["click0"] = "my-action"
webdriver.sync()
- webdriver.action_chain().move_to_element_with_offset(webdriver.root_element, 300,
- 300).click().perform()
+ webdriver.action_chain().move_to_element_with_offset(
+ webdriver.root_element, 300, 300
+ ).click().perform()
event.wait()
action_state = result[0]
assert action_state is not None
- np.testing.assert_array_equal(np.floor(action_state.mouse_voxel_coordinates), [0, 0, 0])
+ np.testing.assert_array_equal(
+ np.floor(action_state.mouse_voxel_coordinates), [0, 0, 0]
+ )
return action_state
-@pytest.mark.parametrize('dtype,value,layer_type', [
- (np.uint8, 1, neuroglancer.ImageLayer),
- (np.uint32, 2**32 - 1, neuroglancer.ImageLayer),
- (np.uint64, 2**64 - 1, neuroglancer.ImageLayer),
- (np.uint64, 2**64 - 1, neuroglancer.SegmentationLayer),
- (np.float32, 1.5, neuroglancer.ImageLayer),
-])
+
+@pytest.mark.parametrize(
+ "dtype,value,layer_type",
+ [
+ (np.uint8, 1, neuroglancer.ImageLayer),
+ (np.uint32, 2**32 - 1, neuroglancer.ImageLayer),
+ (np.uint64, 2**64 - 1, neuroglancer.ImageLayer),
+ (np.uint64, 2**64 - 1, neuroglancer.SegmentationLayer),
+ (np.float32, 1.5, neuroglancer.ImageLayer),
+ ],
+)
def test_selected_value(webdriver, dtype, value, layer_type):
- setup_viewer(viewer=webdriver.viewer, dtype=dtype, value=value, layer_type=layer_type)
+ setup_viewer(
+ viewer=webdriver.viewer, dtype=dtype, value=value, layer_type=layer_type
+ )
action_state = get_selected_value(webdriver)
- assert action_state.selected_values['a'].value == value
+ assert action_state.selected_values["a"].value == value
+
def test_selected_value_with_equivalences(webdriver):
- setup_viewer(viewer=webdriver.viewer,
- dtype=np.uint64,
- value=2,
- layer_type=neuroglancer.SegmentationLayer)
+ setup_viewer(
+ viewer=webdriver.viewer,
+ dtype=np.uint64,
+ value=2,
+ layer_type=neuroglancer.SegmentationLayer,
+ )
with webdriver.viewer.txn() as s:
s.layers[0].equivalences = [[1, 2]]
action_state = get_selected_value(webdriver)
- assert action_state.selected_values['a'].value == neuroglancer.SegmentIdMapEntry(2, 1)
+ assert action_state.selected_values["a"].value == neuroglancer.SegmentIdMapEntry(
+ 2, 1
+ )
diff --git a/python/tests/shader_controls.py b/python/tests/shader_controls.py
index fccda963d8..ee6bca39c6 100644
--- a/python/tests/shader_controls.py
+++ b/python/tests/shader_controls.py
@@ -18,86 +18,94 @@
def test_invlerp(webdriver):
-
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y"], units="nm", scales=[1, 1]
+ )
s.position = [0.5, 0.5]
s.layers.append(
- name='image',
- layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
- dimensions=s.dimensions,
- data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
+ name="image",
+ layer=neuroglancer.ImageLayer(
+ source=neuroglancer.LocalVolume(
+ dimensions=s.dimensions,
+ data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
+ ),
),
- ),
visible=True,
shader_controls={
- 'normalized': {
- 'range': [0, 42],
+ "normalized": {
+ "range": [0, 42],
},
},
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
- control = webdriver.viewer.state.layers['image'].shader_controls['normalized']
+ control = webdriver.viewer.state.layers["image"].shader_controls["normalized"]
assert isinstance(control, neuroglancer.InvlerpParameters)
np.testing.assert_equal(control.range, [0, 42])
def expect_color(color):
webdriver.sync()
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)),
+ )
expect_color([255, 255, 255, 255])
with webdriver.viewer.txn() as s:
- s.layers['image'].shader_controls = {
- 'normalized': neuroglancer.InvlerpParameters(range=[42, 100]),
+ s.layers["image"].shader_controls = {
+ "normalized": neuroglancer.InvlerpParameters(range=[42, 100]),
}
expect_color([0, 0, 0, 255])
def test_slider(webdriver):
-
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y"], units="nm", scales=[1, 1]
+ )
s.position = [0.5, 0.5]
s.layers.append(
- name='image',
- layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
- dimensions=s.dimensions,
- data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
+ name="image",
+ layer=neuroglancer.ImageLayer(
+ source=neuroglancer.LocalVolume(
+ dimensions=s.dimensions,
+ data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
+ ),
),
- ),
visible=True,
- shader='''
+ shader="""
#uicontrol float color slider(min=0, max=10)
void main() {
emitGrayscale(color);
}
-''',
+""",
shader_controls={
- 'color': 1,
+ "color": 1,
},
)
- s.layout = 'xy'
+ s.layout = "xy"
s.cross_section_scale = 1e-6
s.show_axis_lines = False
- control = webdriver.viewer.state.layers['image'].shader_controls['color']
+ control = webdriver.viewer.state.layers["image"].shader_controls["color"]
assert control == 1
def expect_color(color):
webdriver.sync()
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)),
+ )
expect_color([255, 255, 255, 255])
with webdriver.viewer.txn() as s:
- s.layers['image'].shader_controls = {
- 'color': 0,
+ s.layers["image"].shader_controls = {
+ "color": 0,
}
expect_color([0, 0, 0, 255])
diff --git a/python/tests/skeleton_options_test.py b/python/tests/skeleton_options_test.py
index 91a04f8be6..2256af4eb0 100644
--- a/python/tests/skeleton_options_test.py
+++ b/python/tests/skeleton_options_test.py
@@ -13,18 +13,18 @@
# limitations under the License.
"""Tests that skeleton rendering options can be controlled via ViewerState."""
-import numpy as np
import neuroglancer
import neuroglancer.skeleton
-import threading
-import pytest
+import numpy as np
-dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'], units='nm', scales=[1, 1, 1])
+dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+)
class SkeletonSource(neuroglancer.skeleton.SkeletonSource):
def __init__(self):
- super(SkeletonSource, self).__init__(dimensions=dimensions)
+ super().__init__(dimensions=dimensions)
def get_skeleton(self, object_id):
return neuroglancer.skeleton.Skeleton(
@@ -36,37 +36,43 @@ def get_skeleton(self, object_id):
def test_skeleton_options(webdriver):
with webdriver.viewer.txn() as s:
s.dimensions = dimensions
- s.layout = 'xy'
+ s.layout = "xy"
s.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.SegmentationLayer(
source=SkeletonSource(),
segments=[1],
),
)
s.layers[0].skeleton_rendering.line_width2d = 100
- s.layers[0].skeleton_rendering.shader = '''
+ s.layers[0].skeleton_rendering.shader = """
#uicontrol vec3 color color(default="white")
void main () {
emitRGB(color);
}
-'''
- s.layers[0].skeleton_rendering.shader_controls['color'] = '#f00'
+"""
+ s.layers[0].skeleton_rendering.shader_controls["color"] = "#f00"
s.show_axis_lines = False
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
with webdriver.viewer.txn() as s:
- s.layout = '3d'
+ s.layout = "3d"
s.layers[0].skeleton_rendering.line_width3d = 100
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
with webdriver.viewer.txn() as s:
- s.layers[0].source[0].subsources['default'] = False
+ s.layers[0].source[0].subsources["default"] = False
screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
- np.testing.assert_array_equal(screenshot.image_pixels,
- np.tile(np.array([0, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
+ np.testing.assert_array_equal(
+ screenshot.image_pixels,
+ np.tile(np.array([0, 0, 0, 255], dtype=np.uint8), (10, 10, 1)),
+ )
diff --git a/python/tests/title_test.py b/python/tests/title_test.py
index 4b391cde9d..2249a5ead0 100644
--- a/python/tests/title_test.py
+++ b/python/tests/title_test.py
@@ -15,36 +15,35 @@
import neuroglancer
import numpy as np
-import pytest
def test_title(webdriver):
a = np.array([[[255]]], dtype=np.uint8)
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
name="a",
- layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(data=a,
- dimensions=s.dimensions),
- ),
+ layer=neuroglancer.ImageLayer(
+ source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
+ ),
)
webdriver.sync()
- assert webdriver.driver.title == 'neuroglancer'
+ assert webdriver.driver.title == "neuroglancer"
with webdriver.viewer.txn() as s:
- s.title = 'the title'
+ s.title = "the title"
webdriver.sync()
- assert webdriver.driver.title == 'the title - neuroglancer'
+ assert webdriver.driver.title == "the title - neuroglancer"
with webdriver.viewer.txn() as s:
s.title = None
webdriver.sync()
- assert webdriver.driver.title == 'neuroglancer'
+ assert webdriver.driver.title == "neuroglancer"
diff --git a/python/tests/url_state_test.py b/python/tests/url_state_test.py
index 2719f5ed0e..76b11ec019 100644
--- a/python/tests/url_state_test.py
+++ b/python/tests/url_state_test.py
@@ -12,30 +12,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
from neuroglancer import url_state
def test_convert_string_literal():
- quote_initial = u'\''
- desired_quote_char = u'"'
+ quote_initial = "'"
+ desired_quote_char = '"'
quote_search = url_state.DOUBLE_QUOTE_PATTERN
- assert url_state._convert_string_literal(
- u"'hello'",
- quote_initial=quote_initial,
- quote_replace=desired_quote_char,
- quote_search=quote_search) == u"\"hello\""
-
- assert url_state._convert_string_literal(
- u"'hello\"foo'",
- quote_initial=quote_initial,
- quote_replace=desired_quote_char,
- quote_search=quote_search) == u"\"hello\\\"foo\""
+ assert (
+ url_state._convert_string_literal(
+ "'hello'",
+ quote_initial=quote_initial,
+ quote_replace=desired_quote_char,
+ quote_search=quote_search,
+ )
+ == '"hello"'
+ )
+
+ assert (
+ url_state._convert_string_literal(
+ "'hello\"foo'",
+ quote_initial=quote_initial,
+ quote_replace=desired_quote_char,
+ quote_search=quote_search,
+ )
+ == '"hello\\"foo"'
+ )
def test_url_safe_to_json():
- assert url_state.url_safe_to_json("""{'a':'b'_'b':'c'}""") == """{"a":"b","b":"c"}"""
+ assert (
+ url_state.url_safe_to_json("""{'a':'b'_'b':'c'}""") == """{"a":"b","b":"c"}"""
+ )
assert url_state.url_safe_to_json("""['a'_true]""") == """["a",true]"""
assert url_state.url_safe_to_json("""['a',true]""") == """["a",true]"""
diff --git a/python/tests/viewer_state_roundtrip_test.py b/python/tests/viewer_state_roundtrip_test.py
index b0920376e1..242a4c00ce 100644
--- a/python/tests/viewer_state_roundtrip_test.py
+++ b/python/tests/viewer_state_roundtrip_test.py
@@ -13,59 +13,61 @@
# limitations under the License.
"""Tests that ViewerState round trips through the Neuroglancer client."""
-import numpy as np
import neuroglancer
-import threading
-import pytest
+import numpy as np
def test_mesh_silhouette(webdriver):
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
- name='a',
- layer=neuroglancer.SegmentationLayer(source=neuroglancer.LocalVolume(
- data=np.zeros((10, 10, 10), dtype=np.uint8), dimensions=s.dimensions),
- mesh_silhouette_rendering=2),
+ name="a",
+ layer=neuroglancer.SegmentationLayer(
+ source=neuroglancer.LocalVolume(
+ data=np.zeros((10, 10, 10), dtype=np.uint8), dimensions=s.dimensions
+ ),
+ mesh_silhouette_rendering=2,
+ ),
)
state = webdriver.sync()
- assert state.layers['a'].mesh_silhouette_rendering == 2
+ assert state.layers["a"].mesh_silhouette_rendering == 2
def test_layer_subsources(webdriver):
with webdriver.viewer.txn() as s:
- s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
- units="nm",
- scales=[1, 1, 1])
+ s.dimensions = neuroglancer.CoordinateSpace(
+ names=["x", "y", "z"], units="nm", scales=[1, 1, 1]
+ )
s.layers.append(
- name='a',
+ name="a",
layer=neuroglancer.SegmentationLayer(
- source=neuroglancer.LayerDataSource(url=neuroglancer.LocalVolume(
- data=np.zeros((10, 10, 10), dtype=np.uint8), dimensions=s.dimensions),
- enable_default_subsources=False,
- subsources={
- 'default': True,
- 'bounds': False,
- 'meshes': False
- })),
+ source=neuroglancer.LayerDataSource(
+ url=neuroglancer.LocalVolume(
+ data=np.zeros((10, 10, 10), dtype=np.uint8),
+ dimensions=s.dimensions,
+ ),
+ enable_default_subsources=False,
+ subsources={"default": True, "bounds": False, "meshes": False},
+ )
+ ),
)
state = webdriver.sync()
- assert state.layers['a'].source[0].subsources['default'].enabled == True
- assert 'bounds' not in state.layers['a'].source[0].subsources
- assert 'meshes' not in state.layers['a'].source[0].subsources
- assert state.layers['a'].source[0].enable_default_subsources == False
+ assert state.layers["a"].source[0].subsources["default"].enabled is True
+ assert "bounds" not in state.layers["a"].source[0].subsources
+ assert "meshes" not in state.layers["a"].source[0].subsources
+ assert state.layers["a"].source[0].enable_default_subsources is False
with webdriver.viewer.txn() as s:
s.layers[0].source[0].enable_default_subsources = True
- s.layers[0].source[0].subsources['bounds'] = False
- s.layers[0].source[0].subsources['meshes'] = False
+ s.layers[0].source[0].subsources["bounds"] = False
+ s.layers[0].source[0].subsources["meshes"] = False
state = webdriver.sync()
- assert state.layers[0].source[0].enable_default_subsources == True
- assert sorted(state.layers[0].source[0].subsources.keys()) == ['bounds', 'meshes']
- assert state.layers[0].source[0].subsources['bounds'].enabled == False
- assert state.layers[0].source[0].subsources['meshes'].enabled == False
+ assert state.layers[0].source[0].enable_default_subsources is True
+ assert sorted(state.layers[0].source[0].subsources.keys()) == ["bounds", "meshes"]
+ assert state.layers[0].source[0].subsources["bounds"].enabled is False
+ assert state.layers[0].source[0].subsources["meshes"].enabled is False
diff --git a/python/tests/viewer_state_test.py b/python/tests/viewer_state_test.py
index c96d12c7ce..1dc21fc0a6 100644
--- a/python/tests/viewer_state_test.py
+++ b/python/tests/viewer_state_test.py
@@ -12,106 +12,92 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from neuroglancer import viewer_state
-import collections
import numpy as np
import pytest
+from neuroglancer import viewer_state
def test_coordinate_space_from_json():
x = viewer_state.CoordinateSpace(
- collections.OrderedDict([
- ['x', [4e-9, 'm']],
- ['y', [5e-9, 'm']],
- ['z', [6e-9, 'm']],
- ['t', [2, 's']],
- ]))
- assert x.names == ('x', 'y', 'z', 't')
+ {
+ "x": [4e-9, "m"],
+ "y": [5e-9, "m"],
+ "z": [6e-9, "m"],
+ "t": [2, "s"],
+ }
+ )
+ assert x.names == ("x", "y", "z", "t")
np.testing.assert_array_equal(x.scales, [4e-9, 5e-9, 6e-9, 2])
- assert x.units == ('m', 'm', 'm', 's')
+ assert x.units == ("m", "m", "m", "s")
assert x.rank == 4
- assert x[0] == viewer_state.DimensionScale(4e-9, 'm')
+ assert x[0] == viewer_state.DimensionScale(4e-9, "m")
assert x[0:2] == [
- viewer_state.DimensionScale(4e-9, 'm'),
- viewer_state.DimensionScale(5e-9, 'm')
+ viewer_state.DimensionScale(4e-9, "m"),
+ viewer_state.DimensionScale(5e-9, "m"),
]
- assert x['x'] == viewer_state.DimensionScale(4e-9, 'm')
- assert x[1] == viewer_state.DimensionScale(5e-9, 'm')
- assert x['y'] == viewer_state.DimensionScale(5e-9, 'm')
- assert x[2] == viewer_state.DimensionScale(6e-9, 'm')
- assert x['z'] == viewer_state.DimensionScale(6e-9, 'm')
- assert x[3] == viewer_state.DimensionScale(2, 's')
- assert x['t'] == viewer_state.DimensionScale(2, 's')
- assert x.to_json() == collections.OrderedDict([
- ['x', [4e-9, 'm']],
- ['y', [5e-9, 'm']],
- ['z', [6e-9, 'm']],
- ['t', [2, 's']],
- ])
+ assert x["x"] == viewer_state.DimensionScale(4e-9, "m")
+ assert x[1] == viewer_state.DimensionScale(5e-9, "m")
+ assert x["y"] == viewer_state.DimensionScale(5e-9, "m")
+ assert x[2] == viewer_state.DimensionScale(6e-9, "m")
+ assert x["z"] == viewer_state.DimensionScale(6e-9, "m")
+ assert x[3] == viewer_state.DimensionScale(2, "s")
+ assert x["t"] == viewer_state.DimensionScale(2, "s")
+ assert x.to_json() == {
+ "x": [4e-9, "m"],
+ "y": [5e-9, "m"],
+ "z": [6e-9, "m"],
+ "t": [2, "s"],
+ }
def test_coordinate_space_from_split():
- x = viewer_state.CoordinateSpace(names=['x', 'y', 'z', 't'],
- scales=[4, 5, 6, 2],
- units=['nm', 'nm', 'nm', 's'])
- assert x.to_json() == collections.OrderedDict([
- ['x', [4e-9, 'm']],
- ['y', [5e-9, 'm']],
- ['z', [6e-9, 'm']],
- ['t', [2, 's']],
- ])
+ x = viewer_state.CoordinateSpace(
+ names=["x", "y", "z", "t"], scales=[4, 5, 6, 2], units=["nm", "nm", "nm", "s"]
+ )
+ assert x.to_json() == {
+ "x": [4e-9, "m"],
+ "y": [5e-9, "m"],
+ "z": [6e-9, "m"],
+ "t": [2, "s"],
+ }
def test_layers():
layer_json = [
- {
- 'name': 'a',
- 'type': 'segmentation',
- 'visible': False
- },
- {
- 'name': 'b',
- 'type': 'image'
- },
+ {"name": "a", "type": "segmentation", "visible": False},
+ {"name": "b", "type": "image"},
]
layers_ro = viewer_state.Layers(layer_json, _readonly=True)
- assert layers_ro[0].name == 'a'
+ assert layers_ro[0].name == "a"
assert isinstance(layers_ro[0].layer, viewer_state.SegmentationLayer)
- assert layers_ro[0].visible == False
- assert isinstance(layers_ro['a'].layer, viewer_state.SegmentationLayer)
- assert layers_ro[1].name == 'b'
+ assert layers_ro[0].visible is False
+ assert isinstance(layers_ro["a"].layer, viewer_state.SegmentationLayer)
+ assert layers_ro[1].name == "b"
assert isinstance(layers_ro[1].layer, viewer_state.ImageLayer)
- assert layers_ro[1].visible == True
- assert isinstance(layers_ro['b'].layer, viewer_state.ImageLayer)
+ assert layers_ro[1].visible is True
+ assert isinstance(layers_ro["b"].layer, viewer_state.ImageLayer)
with pytest.raises(AttributeError):
- layers_ro['c'] = viewer_state.ImageLayer()
+ layers_ro["c"] = viewer_state.ImageLayer()
with pytest.raises(AttributeError):
del layers_ro[0]
with pytest.raises(AttributeError):
- del layers_ro['a']
+ del layers_ro["a"]
with pytest.raises(AttributeError):
del layers_ro[:]
layers_rw = viewer_state.Layers(layer_json)
del layers_rw[0]
assert layers_rw.to_json() == [
- {
- 'name': 'b',
- 'type': 'image'
- },
+ {"name": "b", "type": "image"},
]
layers_rw = viewer_state.Layers(layer_json)
- del layers_rw['a']
+ del layers_rw["a"]
assert layers_rw.to_json() == [
- {
- 'name': 'b',
- 'type': 'image'
- },
+ {"name": "b", "type": "image"},
]
layers_rw = viewer_state.Layers(layer_json)
diff --git a/python/tests/write_annotations_test.py b/python/tests/write_annotations_test.py
index 82294e2d91..b634fbe827 100644
--- a/python/tests/write_annotations_test.py
+++ b/python/tests/write_annotations_test.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import os
import pathlib
@@ -22,27 +21,26 @@
def test_annotation_writer_axis_aligned_bounding_box(tmp_path: pathlib.Path):
- dim_dict = {'names': ['x', 'y'], 'units': ['m', 'm'], 'scales': [1, 1]}
- coordinate_space = neuroglancer.CoordinateSpace(**dim_dict)
- writer = write_annotations.AnnotationWriter(
- coordinate_space=coordinate_space,
- annotation_type='axis_aligned_bounding_box')
- writer.add_axis_aligned_bounding_box([2, 5], [3, 6])
- writer.write(tmp_path)
- assert os.path.exists(os.path.join(tmp_path, 'info'))
- assert os.path.exists(os.path.join(tmp_path, 'spatial0'))
- assert os.path.exists(os.path.join(tmp_path, 'by_id'))
+ dim_dict = {"names": ["x", "y"], "units": ["m", "m"], "scales": [1, 1]}
+ coordinate_space = neuroglancer.CoordinateSpace(**dim_dict)
+ writer = write_annotations.AnnotationWriter(
+ coordinate_space=coordinate_space, annotation_type="axis_aligned_bounding_box"
+ )
+ writer.add_axis_aligned_bounding_box([2, 5], [3, 6])
+ writer.write(tmp_path)
+ assert os.path.exists(os.path.join(tmp_path, "info"))
+ assert os.path.exists(os.path.join(tmp_path, "spatial0"))
+ assert os.path.exists(os.path.join(tmp_path, "by_id"))
def test_annotation_writer_point(tmp_path: pathlib.Path):
- dim_dict = {'names': ['x', 'y'], 'units': ['m', 'm'], 'scales': [1, 1]}
- coordinate_space = neuroglancer.CoordinateSpace(**dim_dict)
- writer = write_annotations.AnnotationWriter(
- coordinate_space=coordinate_space,
- annotation_type='point')
- writer.add_point([2, 5])
- writer.write(tmp_path)
- assert os.path.exists(os.path.join(tmp_path, 'info'))
- assert os.path.exists(os.path.join(tmp_path, 'spatial0'))
- assert os.path.exists(os.path.join(tmp_path, 'by_id'))
-
+ dim_dict = {"names": ["x", "y"], "units": ["m", "m"], "scales": [1, 1]}
+ coordinate_space = neuroglancer.CoordinateSpace(**dim_dict)
+ writer = write_annotations.AnnotationWriter(
+ coordinate_space=coordinate_space, annotation_type="point"
+ )
+ writer.add_point([2, 5])
+ writer.write(tmp_path)
+ assert os.path.exists(os.path.join(tmp_path, "info"))
+ assert os.path.exists(os.path.join(tmp_path, "spatial0"))
+ assert os.path.exists(os.path.join(tmp_path, "by_id"))
diff --git a/python/tests/zarr_test.py b/python/tests/zarr_test.py
index b459ac70f9..1e9c5ae5fd 100644
--- a/python/tests/zarr_test.py
+++ b/python/tests/zarr_test.py
@@ -14,116 +14,82 @@
"""Tests that shader control parameters can be specified from Python."""
import pathlib
-from typing import Tuple
import neuroglancer
import numpy as np
import pytest
-import tensorstore as ts
TEST_DATA_DIR = pathlib.Path(__file__).parent.parent / "testdata"
-@pytest.mark.parametrize('spec', [
- {
- "driver": "zarr"
- },
- {
- "driver": "zarr",
- "schema": {
- "chunk_layout": {
- "inner_order": [2, 1, 0]
- }
- }
- },
- {
- "driver": "zarr3"
- },
- {
- "driver": "zarr3",
- "schema": {
- "chunk_layout": {
- "inner_order": [2, 1, 0]
- }
- }
- },
- {
- "driver": "zarr3",
- "schema": {
- "chunk_layout": {
- "read_chunk": {
- "shape": [2, 3, 4]
- },
- "write_chunk": {
- "shape": [6, 12, 20]
+@pytest.mark.parametrize(
+ "spec",
+ [
+ {"driver": "zarr"},
+ {"driver": "zarr", "schema": {"chunk_layout": {"inner_order": [2, 1, 0]}}},
+ {"driver": "zarr3"},
+ {"driver": "zarr3", "schema": {"chunk_layout": {"inner_order": [2, 1, 0]}}},
+ {
+ "driver": "zarr3",
+ "schema": {
+ "chunk_layout": {
+ "read_chunk": {"shape": [2, 3, 4]},
+ "write_chunk": {"shape": [6, 12, 20]},
}
- }
- }
- },
- {
- "driver": "zarr3",
- "schema": {
- "chunk_layout": {
- "inner_order": [2, 0, 1],
- "read_chunk": {
- "shape": [2, 3, 4]
- },
- "write_chunk": {
- "shape": [6, 12, 20]
- }
- }
- }
- },
- {
- "driver": "zarr3",
- "schema": {
- "chunk_layout": {
- "write_chunk": {
- "shape": [6, 12, 24]
- }
- }
+ },
},
- "metadata": {
- "codecs": [{
- "name": "transpose",
- "configuration": {
- "order": [0, 2, 1]
+ {
+ "driver": "zarr3",
+ "schema": {
+ "chunk_layout": {
+ "inner_order": [2, 0, 1],
+ "read_chunk": {"shape": [2, 3, 4]},
+ "write_chunk": {"shape": [6, 12, 20]},
}
- }, {
- "name": "sharding_indexed",
- "configuration": {
- "chunk_shape": [2, 3, 4],
- "index_codecs": [{
- "name": "transpose",
- "configuration": {
- "order": [3, 1, 0, 2]
- }
- }, {
- "name": "bytes",
- "configuration": {
- "endian": "little"
- }
- }],
- "codecs": [{
- "name": "transpose",
- "configuration": {
- "order": [2, 1, 0]
- }
- }, {
- "name": "bytes",
+ },
+ },
+ {
+ "driver": "zarr3",
+ "schema": {"chunk_layout": {"write_chunk": {"shape": [6, 12, 24]}}},
+ "metadata": {
+ "codecs": [
+ {"name": "transpose", "configuration": {"order": [0, 2, 1]}},
+ {
+ "name": "sharding_indexed",
"configuration": {
- "endian": "little"
- }
- }, {
- "name": "gzip"
- }]
- }
- }]
- }
- },
-],
- ids=str)
-def test_zarr(tempdir_server: Tuple[pathlib.Path, str], webdriver, spec):
+ "chunk_shape": [2, 3, 4],
+ "index_codecs": [
+ {
+ "name": "transpose",
+ "configuration": {"order": [3, 1, 0, 2]},
+ },
+ {
+ "name": "bytes",
+ "configuration": {"endian": "little"},
+ },
+ ],
+ "codecs": [
+ {
+ "name": "transpose",
+ "configuration": {"order": [2, 1, 0]},
+ },
+ {
+ "name": "bytes",
+ "configuration": {"endian": "little"},
+ },
+ {"name": "gzip"},
+ ],
+ },
+ },
+ ]
+ },
+ },
+ ],
+ ids=str,
+)
+def test_zarr(tempdir_server: tuple[pathlib.Path, str], webdriver, spec):
+ import tensorstore as ts
+
tmp_path, server_url = tempdir_server
shape = [10, 20, 30]
@@ -131,9 +97,9 @@ def test_zarr(tempdir_server: Tuple[pathlib.Path, str], webdriver, spec):
a = np.arange(np.prod(shape), dtype=np.int32).reshape(shape)
full_spec = {
- 'kvstore': {
- 'driver': 'file',
- 'path': str(tmp_path),
+ "kvstore": {
+ "driver": "file",
+ "path": str(tmp_path),
}
}
full_spec.update(spec)
@@ -142,68 +108,80 @@ def test_zarr(tempdir_server: Tuple[pathlib.Path, str], webdriver, spec):
store[...] = a
with webdriver.viewer.txn() as s:
- s.layers.append(name="a", layer=neuroglancer.ImageLayer(source=f'zarr://{server_url}'))
+ s.layers.append(
+ name="a", layer=neuroglancer.ImageLayer(source=f"zarr://{server_url}")
+ )
- vol = webdriver.viewer.volume('a').result()
+ vol = webdriver.viewer.volume("a").result()
b = vol.read().result()
np.testing.assert_equal(a, b)
EXCLUDED_ZARR_V2_CASES = {
- '.zgroup',
- '.zattrs',
- '.zmetadata',
+ ".zgroup",
+ ".zattrs",
+ ".zmetadata",
# bool not supported by neuroglancer
- '1d.contiguous.b1',
+ "1d.contiguous.b1",
# float64 not supported by neuroglancer
- '1d.contiguous.f8',
+ "1d.contiguous.f8",
# LZ4 not supported by neuroglancer or tensorstore
- '1d.contiguous.lz4.i2',
+ "1d.contiguous.lz4.i2",
# S not supported by neuroglancer
- '1d.contiguous.S7',
+ "1d.contiguous.S7",
# U not supported by neuroglancer
- '1d.contiguous.U13.be',
- '1d.contiguous.U13.le',
- '1d.contiguous.U7',
- '2d.chunked.U7',
+ "1d.contiguous.U13.be",
+ "1d.contiguous.U13.le",
+ "1d.contiguous.U7",
+ "2d.chunked.U7",
# VLenUTF8 not supported by neuroglancer
- '3d.chunked.O',
+ "3d.chunked.O",
}
EXCLUDED_ZARR_V3_CASES = {
- 'zarr.json',
+ "zarr.json",
# bool not supported by neuroglancer
- '1d.contiguous.b1',
- '1d.contiguous.compressed.sharded.b1',
+ "1d.contiguous.b1",
+ "1d.contiguous.compressed.sharded.b1",
# float64 not supported by neuroglancer
- '1d.contiguous.f8',
- '1d.contiguous.compressed.sharded.f8',
+ "1d.contiguous.f8",
+ "1d.contiguous.compressed.sharded.f8",
}
-@pytest.mark.parametrize('driver,data_dir',
- [('zarr', p)
- for p in TEST_DATA_DIR.glob('zarr_v2/from_zarr-python/data.zarr/*')
- if p.name != '.zgroup' and p.name not in EXCLUDED_ZARR_V2_CASES] +
- [('zarr3', p)
- for p in TEST_DATA_DIR.glob('zarr_v3/from_zarrita/data.zarr/*')
- if p.name not in EXCLUDED_ZARR_V3_CASES],
- ids=str)
+@pytest.mark.parametrize(
+ "driver,data_dir",
+ [
+ ("zarr", p)
+ for p in TEST_DATA_DIR.glob("zarr_v2/from_zarr-python/data.zarr/*")
+ if p.name != ".zgroup" and p.name not in EXCLUDED_ZARR_V2_CASES
+ ]
+ + [
+ ("zarr3", p)
+ for p in TEST_DATA_DIR.glob("zarr_v3/from_zarrita/data.zarr/*")
+ if p.name not in EXCLUDED_ZARR_V3_CASES
+ ],
+ ids=str,
+)
def test_data(driver: str, data_dir: pathlib.Path, static_file_server, webdriver):
+ import tensorstore as ts
+
server_url = static_file_server(data_dir)
full_spec = {
- 'driver': driver,
- 'kvstore': {
- 'driver': 'file',
- 'path': str(data_dir),
- }
+ "driver": driver,
+ "kvstore": {
+ "driver": "file",
+ "path": str(data_dir),
+ },
}
store = ts.open(full_spec, open=True, read=True).result()
a = store.read().result()
with webdriver.viewer.txn() as s:
- s.layers.append(name="a", layer=neuroglancer.ImageLayer(source=f'zarr://{server_url}'))
+ s.layers.append(
+ name="a", layer=neuroglancer.ImageLayer(source=f"zarr://{server_url}")
+ )
- vol = webdriver.viewer.volume('a').result()
+ vol = webdriver.viewer.volume("a").result()
b = vol.read().result()
np.testing.assert_equal(a, b)
diff --git a/setup.py b/setup.py
index ba695e5199..8100216052 100755
--- a/setup.py
+++ b/setup.py
@@ -1,21 +1,16 @@
#!/usr/bin/env python
-import sys
-if sys.version_info < (3, 5):
- print('Python >= 3.5 is required to build')
- sys.exit(1)
+
# Import setuptools before distutils because setuptools monkey patches
# distutils.
-import setuptools
-
import atexit
-import distutils.command.build
import os
import platform
import shutil
import subprocess
import tempfile
-import time
+
+import setuptools
import setuptools.command.build
import setuptools.command.build_ext
import setuptools.command.develop
@@ -23,11 +18,13 @@
import setuptools.command.sdist
import setuptools.command.test
-package_name = 'neuroglancer'
+package_name = "neuroglancer"
root_dir = os.path.dirname(__file__)
-python_dir = os.path.join(root_dir, 'python')
-src_dir = os.path.join(python_dir, 'ext', 'src')
-openmesh_dir = os.path.join(python_dir, 'ext', 'third_party', 'openmesh', 'OpenMesh', 'src')
+python_dir = os.path.join(root_dir, "python")
+src_dir = os.path.join(python_dir, "ext", "src")
+openmesh_dir = os.path.join(
+ python_dir, "ext", "third_party", "openmesh", "OpenMesh", "src"
+)
CLIENT_FILES = [
"index.html",
@@ -41,7 +38,20 @@
"async_computation.bundle.js.map",
]
-with open(os.path.join(python_dir, 'README.md'), mode='r', encoding='utf-8') as f:
+
+_SETUP_REQUIRES = [
+ "setuptools_scm>=4.1.2",
+ "numpy>=1.11.0",
+]
+
+
+_PACKAGE_JSON_EXISTS = os.path.exists(os.path.join(root_dir, "package.json"))
+
+if _PACKAGE_JSON_EXISTS:
+ _SETUP_REQUIRES.append("nodejs-bin[cmd]")
+
+
+with open(os.path.join(python_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
@@ -52,12 +62,12 @@ def _maybe_bundle_client(cmd, inplace=False):
rebuilding it.
"""
- bundle_client_cmd = cmd.distribution.get_command_obj('bundle_client')
+ bundle_client_cmd = cmd.distribution.get_command_obj("bundle_client")
if inplace:
bundle_client_cmd.build_bundle_inplace = True
if bundle_client_cmd.skip_rebuild is None:
bundle_client_cmd.skip_rebuild = True
- cmd.run_command('bundle_client')
+ cmd.run_command("bundle_client")
def _setup_temp_egg_info(cmd):
@@ -68,7 +78,7 @@ def _setup_temp_egg_info(cmd):
doesn't litter the source directory and doesn't pick up a stale SOURCES.txt
from a previous build.
"""
- egg_info_cmd = cmd.distribution.get_command_obj('egg_info')
+ egg_info_cmd = cmd.distribution.get_command_obj("egg_info")
if egg_info_cmd.egg_base is None:
tempdir = tempfile.TemporaryDirectory(dir=os.curdir)
egg_info_cmd.egg_base = tempdir.name
@@ -76,7 +86,6 @@ def _setup_temp_egg_info(cmd):
class SdistCommand(setuptools.command.sdist.sdist):
-
def run(self):
# Build the client bundle if it does not already exist. If it has
# already been built but is stale, the user is responsible for
@@ -89,59 +98,65 @@ def make_release_tree(self, base_dir, files):
# Exclude .egg-info from source distribution. These aren't actually
# needed, and due to the use of the temporary directory in `run`, the
# path isn't correct if it gets included.
- files = [x for x in files if '.egg-info' not in x]
+ files = [x for x in files if ".egg-info" not in x]
super().make_release_tree(base_dir, files)
-setuptools.command.build.build.sub_commands.append(('bundle_client', None))
+setuptools.command.build.build.sub_commands.append(("bundle_client", None))
class BuildExtCommand(setuptools.command.build_ext.build_ext):
-
def finalize_options(self):
super().finalize_options()
# Prevent numpy from thinking it is still in its setup process
if isinstance(__builtins__, dict):
- __builtins__['__NUMPY_SETUP__'] = False
+ __builtins__["__NUMPY_SETUP__"] = False
else:
- setattr(__builtins__, '__NUMPY_SETUP__', False)
+ setattr(__builtins__, "__NUMPY_SETUP__", False)
import numpy
+
self.include_dirs.append(numpy.get_include())
class InstallCommand(setuptools.command.install.install):
-
def run(self):
_setup_temp_egg_info(self)
super().run()
class DevelopCommand(setuptools.command.develop.develop):
-
def run(self):
_maybe_bundle_client(self)
super().run()
-class BundleClientCommand(setuptools.command.build.build, setuptools.command.build.SubCommand):
-
+class BundleClientCommand(
+ setuptools.command.build.build, setuptools.command.build.SubCommand
+):
editable_mode: bool = False
user_options = [
- ('client-bundle-type=', None,
- 'The nodejs bundle type. "min" (default) creates condensed static files for production, "dev" creates human-readable files.'
- ),
- ('build-bundle-inplace', None, 'Build the client bundle inplace.'),
- ('skip-npm-reinstall', None,
- 'Skip running `npm install` if the `node_modules` directory already exists.'),
- ('skip-rebuild', None,
- 'Skip rebuilding if the `python/neuroglancer/static/index.html` file already exists.'),
+ (
+ "client-bundle-type=",
+ None,
+ 'The nodejs bundle type. "min" (default) creates condensed static files for production, "dev" creates human-readable files.',
+ ),
+ ("build-bundle-inplace", None, "Build the client bundle inplace."),
+ (
+ "skip-npm-reinstall",
+ None,
+ "Skip running `npm install` if the `node_modules` directory already exists.",
+ ),
+ (
+ "skip-rebuild",
+ None,
+ "Skip rebuilding if the `python/neuroglancer/static/index.html` file already exists.",
+ ),
]
def initialize_options(self):
-
self.build_lib = None
- self.client_bundle_type = 'min'
+ self.client_bundle_type = "min"
self.skip_npm_reinstall = None
self.skip_rebuild = None
self.build_bundle_inplace = None
@@ -149,14 +164,16 @@ def initialize_options(self):
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
- if self.client_bundle_type not in ['min', 'dev']:
+ if self.client_bundle_type not in ["min", "dev"]:
raise RuntimeError('client-bundle-type has to be one of "min" or "dev"')
if self.skip_npm_reinstall is None:
self.skip_npm_reinstall = False
if self.build_bundle_inplace is None:
- self.build_bundle_inplace = (os.getenv('NEUROGLANCER_BUILD_BUNDLE_INPLACE') == '1')
+ self.build_bundle_inplace = (
+ os.getenv("NEUROGLANCER_BUILD_BUNDLE_INPLACE") == "1"
+ )
if self.skip_rebuild is None:
self.skip_rebuild = self.build_bundle_inplace
@@ -178,12 +195,14 @@ def get_output_mapping(self):
def run(self):
inplace = self.editable_mode or self.build_bundle_inplace
- print(f'Building client bundle: inplace={inplace}, skip_rebuild={self.skip_rebuild}')
+ print(
+ f"Building client bundle: inplace={inplace}, skip_rebuild={self.skip_rebuild}"
+ )
# If building from an sdist, `package.json` won't be present but the
# bundled files will.
- if not os.path.exists(os.path.join(root_dir, 'package.json')):
- print('Skipping build of client bundle because package.json does not exist')
+ if not _PACKAGE_JSON_EXISTS:
+ print("Skipping build of client bundle because package.json does not exist")
for dest, source in self.get_output_mapping().items():
if dest != source:
shutil.copyfile(source, dest)
@@ -194,58 +213,67 @@ def run(self):
else:
output_base_dir = self.build_lib
- output_dir = os.path.join(output_base_dir, 'neuroglancer', 'static')
+ output_dir = os.path.join(output_base_dir, "neuroglancer", "static")
if self.skip_rebuild and inplace:
- html_path = os.path.join(output_dir, 'index.html')
+ html_path = os.path.join(output_dir, "index.html")
if os.path.exists(html_path):
- print('Skipping rebuild of client bundle since %s already exists' % (html_path, ))
+ print(
+ "Skipping rebuild of client bundle since {} already exists".format(
+ html_path
+ )
+ )
return
target = {"min": "build-python-min", "dev": "build-python-dev"}
try:
t = target[self.client_bundle_type]
- node_modules_path = os.path.join(root_dir, 'node_modules')
- if (self.skip_npm_reinstall and os.path.exists(node_modules_path)):
- print('Skipping `npm install` since %s already exists' % (node_modules_path, ))
+ node_modules_path = os.path.join(root_dir, "node_modules")
+ if self.skip_npm_reinstall and os.path.exists(node_modules_path):
+ print(
+ f"Skipping `npm install` since {node_modules_path} already exists"
+ )
else:
- subprocess.call('npm i', shell=True, cwd=root_dir)
- res = subprocess.call(f'npm run {t} -- --output={output_dir}', shell=True, cwd=root_dir)
- except:
+ subprocess.call("npm i", shell=True, cwd=root_dir)
+ res = subprocess.call(
+ f"npm run {t} -- --output={output_dir}", shell=True, cwd=root_dir
+ )
+ except Exception:
raise RuntimeError(
- 'Could not run \'npm run %s\'. Make sure node.js >= v12 is installed and in your path.'
- % t)
+ "Could not run 'npm run %s'. Make sure node.js >= v12 is installed and in your path."
+ % t
+ )
if res != 0:
- raise RuntimeError('failed to bundle neuroglancer node.js project')
+ raise RuntimeError("failed to bundle neuroglancer node.js project")
local_sources = [
- '_neuroglancer.cc',
- 'openmesh_dependencies.cc',
- 'on_demand_object_mesh_generator.cc',
- 'voxel_mesh_generator.cc',
- 'mesh_objects.cc',
+ "_neuroglancer.cc",
+ "openmesh_dependencies.cc",
+ "on_demand_object_mesh_generator.cc",
+ "voxel_mesh_generator.cc",
+ "mesh_objects.cc",
]
USE_OMP = False
if USE_OMP:
- openmp_flags = ['-fopenmp']
+ openmp_flags = ["-fopenmp"]
else:
openmp_flags = []
-extra_compile_args = ['-std=c++11', '-fvisibility=hidden', '-O3'] + openmp_flags
-if platform.system() == 'Darwin':
- extra_compile_args.insert(0, '-stdlib=libc++')
+extra_compile_args = ["-std=c++11", "-fvisibility=hidden", "-O3"] + openmp_flags
+if platform.system() == "Darwin":
+ extra_compile_args.insert(0, "-stdlib=libc++")
# Disable newer exception handling from Visual Studio 2019, since it requires a
# newer C++ runtime than shipped with Python. The C++ extension doesn't use
# exceptions anyway.
#
# https://cibuildwheel.readthedocs.io/en/stable/faq/#importerror-dll-load-failed-the-specific-module-could-not-be-found-error-on-windows
-if platform.system() == 'Windows':
- extra_compile_args.append('/d2FH4-')
+if platform.system() == "Windows":
+ extra_compile_args.append("/d2FH4-")
# Copied from setuptools_scm, can be removed once a released version of
@@ -262,7 +290,6 @@ def _no_guess_dev_version(version):
setuptools.setup(
name=package_name,
-
# Use setuptools_scm to determine version from git tags
use_scm_version={
"relative_to": __file__,
@@ -270,62 +297,59 @@ def _no_guess_dev_version(version):
"local_scheme": "no-local-version",
"parentdir_prefix_version": package_name + "-",
},
- description='Python data backend for neuroglancer, a WebGL-based viewer for volumetric data',
+ description="Python data backend for neuroglancer, a WebGL-based viewer for volumetric data",
long_description=long_description,
- long_description_content_type='text/markdown',
- author='Jeremy Maitin-Shepard',
- author_email='jbms@google.com',
- url='https://github.com/google/neuroglancer',
- license='Apache License 2.0',
- packages=setuptools.find_packages('python'),
+ long_description_content_type="text/markdown",
+ author="Jeremy Maitin-Shepard",
+ author_email="jbms@google.com",
+ url="https://github.com/google/neuroglancer",
+ license="Apache License 2.0",
+ python_requires=">=3.9",
+ packages=setuptools.find_packages("python"),
package_dir={
- '': 'python',
+ "": "python",
},
package_data={
- 'neuroglancer.static': ['*.html', '*.css', '*.js', '*.js.map'],
+ "neuroglancer.static": ["*.html", "*.css", "*.js", "*.js.map"],
},
- setup_requires=[
- "setuptools_scm>=4.1.2",
- "numpy>=1.11.0",
- ],
+ setup_requires=_SETUP_REQUIRES,
install_requires=[
"Pillow>=3.2.0",
"numpy>=1.11.0",
- 'requests',
- 'tornado',
- 'six',
- 'google-apitools',
- 'google-auth',
- 'atomicwrites',
- 'typing_extensions',
+ "requests",
+ "tornado",
+ "google-apitools",
+ "google-auth",
+ "atomicwrites",
],
extras_require={
- 'test': [
- 'pytest>=6.1.2',
- 'pytest-rerunfailures>=9.1.1',
- 'pytest-timeout>=1.4.2',
+ "test": [
+ "pytest>=6.1.2",
+ "pytest-rerunfailures>=9.1.1",
+ "pytest-timeout>=1.4.2",
],
- 'test-browser': [
- 'selenium>=4',
+ "test-browser": [
+ "selenium>=4",
],
},
ext_modules=[
setuptools.Extension(
- 'neuroglancer._neuroglancer',
+ "neuroglancer._neuroglancer",
sources=[os.path.join(src_dir, name) for name in local_sources],
- language='c++',
+ language="c++",
include_dirs=[openmesh_dir],
define_macros=[
- ('_USE_MATH_DEFINES', None), # Needed by OpenMesh when used with MSVC
+ ("_USE_MATH_DEFINES", None), # Needed by OpenMesh when used with MSVC
],
extra_compile_args=extra_compile_args,
- extra_link_args=openmp_flags),
+ extra_link_args=openmp_flags,
+ ),
],
cmdclass={
- 'sdist': SdistCommand,
- 'bundle_client': BundleClientCommand,
- 'build_ext': BuildExtCommand,
- 'install': InstallCommand,
- 'develop': DevelopCommand,
+ "sdist": SdistCommand,
+ "bundle_client": BundleClientCommand,
+ "build_ext": BuildExtCommand,
+ "install": InstallCommand,
+ "develop": DevelopCommand,
},
)
diff --git a/src/neuroglancer/mesh/draco/build_wasm.py b/src/neuroglancer/mesh/draco/build_wasm.py
index 0db2b90d8d..0d9057f006 100755
--- a/src/neuroglancer/mesh/draco/build_wasm.py
+++ b/src/neuroglancer/mesh/draco/build_wasm.py
@@ -16,117 +16,124 @@
import sys
import tempfile
-DRACO_ROOT = pathlib.Path('/usr/src/draco')
-DRACO_SRC = DRACO_ROOT / 'src'
+DRACO_ROOT = pathlib.Path("/usr/src/draco")
+DRACO_SRC = DRACO_ROOT / "src"
SETTINGS = {
# Disable filesystem interface, as it is unused.
- 'FILESYSTEM': '0',
- 'ALLOW_MEMORY_GROWTH': '1',
- 'TOTAL_STACK': '32768',
- 'TOTAL_MEMORY': '65536',
- 'EXPORTED_FUNCTIONS': '["_neuroglancer_draco_decode","_malloc"]',
- 'MALLOC': 'emmalloc',
- 'ENVIRONMENT': 'worker',
+ "FILESYSTEM": "0",
+ "ALLOW_MEMORY_GROWTH": "1",
+ "TOTAL_STACK": "32768",
+ "TOTAL_MEMORY": "65536",
+ "EXPORTED_FUNCTIONS": '["_neuroglancer_draco_decode","_malloc"]',
+ "MALLOC": "emmalloc",
+ "ENVIRONMENT": "worker",
# Build in standalone mode (also implied by -o .wasm option below)
# https://github.com/emscripten-core/emscripten/wiki/WebAssembly-Standalone
#
# This causes the memory to be managed by WebAssembly rather than
# JavaScript, and reduces the necessary JavaScript size.
- 'STANDALONE_WASM': True,
+ "STANDALONE_WASM": True,
}
DRACO_SOURCE_GROUPS = {
- 'draco_attributes_sources',
- 'draco_compression_attributes_dec_sources',
- 'draco_compression_attributes_pred_schemes_dec_sources',
- 'draco_compression_bit_coders_sources',
- 'draco_compression_decode_sources',
- 'draco_compression_entropy_sources',
- 'draco_compression_mesh_traverser_sources',
- 'draco_compression_mesh_dec_sources',
- 'draco_compression_point_cloud_dec_sources',
- 'draco_core_sources',
- 'draco_dec_config_sources',
- 'draco_mesh_sources',
- 'draco_metadata_dec_sources',
- 'draco_metadata_sources',
- 'draco_point_cloud_sources',
- 'draco_points_dec_sources',
+ "draco_attributes_sources",
+ "draco_compression_attributes_dec_sources",
+ "draco_compression_attributes_pred_schemes_dec_sources",
+ "draco_compression_bit_coders_sources",
+ "draco_compression_decode_sources",
+ "draco_compression_entropy_sources",
+ "draco_compression_mesh_traverser_sources",
+ "draco_compression_mesh_dec_sources",
+ "draco_compression_point_cloud_dec_sources",
+ "draco_core_sources",
+ "draco_dec_config_sources",
+ "draco_mesh_sources",
+ "draco_metadata_dec_sources",
+ "draco_metadata_sources",
+ "draco_point_cloud_sources",
+ "draco_points_dec_sources",
}
def get_draco_sources():
"""Obtain the list of source files from CMakeLists.txt."""
- cmakelists_content = (DRACO_ROOT / 'CMakeLists.txt').read_text()
+ cmakelists_content = (DRACO_ROOT / "CMakeLists.txt").read_text()
sources = []
seen_keys = set()
- for m in re.finditer(r'list\s*\(\s*APPEND\s+(draco_[a-z_]*_sources)\s+([^)]*)\)',
- cmakelists_content):
+ for m in re.finditer(
+ r"list\s*\(\s*APPEND\s+(draco_[a-z_]*_sources)\s+([^)]*)\)", cmakelists_content
+ ):
key = m.group(1)
- if key not in DRACO_SOURCE_GROUPS: continue
+ if key not in DRACO_SOURCE_GROUPS:
+ continue
seen_keys.add(key)
key_sources = [
- x.strip('"').replace('${draco_src_root}', str(DRACO_SRC / 'draco'))
+ x.strip('"').replace("${draco_src_root}", str(DRACO_SRC / "draco"))
for x in m.group(2).split()
]
- sources.extend(x for x in key_sources if not x.endswith('.h'))
+ sources.extend(x for x in key_sources if not x.endswith(".h"))
remaining_keys = DRACO_SOURCE_GROUPS - seen_keys
if remaining_keys:
- raise Exception(f'missing source groups: {remaining_keys}')
+ raise Exception(f"missing source groups: {remaining_keys}")
return sources
def main():
- sources = ['neuroglancer_draco.cc'] + get_draco_sources()
+ sources = ["neuroglancer_draco.cc"] + get_draco_sources()
settings_args = []
for k, v in SETTINGS.items():
- settings_args.append('-s')
+ settings_args.append("-s")
if v is True:
settings_args.append(k)
else:
- settings_args.append(f'{k}={v}')
+ settings_args.append(f"{k}={v}")
# Use unity build for faster compilation (avoids redundant parsing of
# headers, and redundant template instantiations).
- with tempfile.NamedTemporaryFile(suffix='.cc', mode='wb') as f:
+ with tempfile.NamedTemporaryFile(suffix=".cc", mode="wb") as f:
for source in sources:
f.write(pathlib.Path(source).read_bytes())
f.flush()
sys.exit(
- subprocess.run([
- 'emcc',
- f.name,
- # Note: Using -Os instead of -O3 reduces the size significantly,
- # but may harm performance.
- '-O3',
- # Disable debug assertions.
- '-DNDEBUG',
- # Specifies the interface that will be provided by JavaScript,
- # to avoid link errors.
- '--js-library',
- 'stub.js',
- # Disable exception handling to reduce generated code size, as
- # it is unused and requires JavaScript support.
- '-fno-exceptions',
- # Disable RTTI to reduce generated code size, as it is unused.
- '-fno-rtti',
- # neuroglancer_draco.cc does not define a `main()` function.
- # Instead, this wasm module is intended to be used as a
- # "reactor", i.e. library.
- '--no-entry',
- ] + settings_args + [
- '-std=c++14',
- '-Idraco_overlay',
- f'-I{DRACO_SRC}',
- '-o',
- 'neuroglancer_draco.wasm',
- ]).returncode)
-
-
-if __name__ == '__main__':
+ subprocess.run(
+ [
+ "emcc",
+ f.name,
+ # Note: Using -Os instead of -O3 reduces the size significantly,
+ # but may harm performance.
+ "-O3",
+ # Disable debug assertions.
+ "-DNDEBUG",
+ # Specifies the interface that will be provided by JavaScript,
+ # to avoid link errors.
+ "--js-library",
+ "stub.js",
+ # Disable exception handling to reduce generated code size, as
+ # it is unused and requires JavaScript support.
+ "-fno-exceptions",
+ # Disable RTTI to reduce generated code size, as it is unused.
+ "-fno-rtti",
+ # neuroglancer_draco.cc does not define a `main()` function.
+ # Instead, this wasm module is intended to be used as a
+ # "reactor", i.e. library.
+ "--no-entry",
+ ]
+ + settings_args
+ + [
+ "-std=c++14",
+ "-Idraco_overlay",
+ f"-I{DRACO_SRC}",
+ "-o",
+ "neuroglancer_draco.wasm",
+ ]
+ ).returncode
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/src/neuroglancer/mesh/draco/generate_first_bit_table.py b/src/neuroglancer/mesh/draco/generate_first_bit_table.py
index 85d48b3eff..33ea624254 100644
--- a/src/neuroglancer/mesh/draco/generate_first_bit_table.py
+++ b/src/neuroglancer/mesh/draco/generate_first_bit_table.py
@@ -25,4 +25,4 @@
bit_list.append(bit)
-print('{' + ', '.join(str(b) for b in bit_list) + '}')
+print("{" + ", ".join(str(b) for b in bit_list) + "}")
diff --git a/testdata/generate_npy_examples.py b/testdata/generate_npy_examples.py
index 4f536da665..265f4afe4d 100755
--- a/testdata/generate_npy_examples.py
+++ b/testdata/generate_npy_examples.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# @license
# Copyright 2016 Google Inc.
@@ -19,42 +19,50 @@
#
# This should be run from within the testdata/ directory.
-import numpy as np
import json
+import numpy as np
+
+
def write_array(array):
- dtype = array.dtype
- if dtype == np.uint8:
- byte_orders = [('=', '')]
- else:
- byte_orders = [('<','-le'), ('>','-be')]
- for byte_order_i, (byte_order, byte_order_name) in enumerate(byte_orders):
- new_array = np.array(array, dtype=dtype.newbyteorder(byte_order))
- name = 'npy_test.%s%s' % (dtype.name, byte_order_name)
- np.save(name, array)
- array_for_json = array
- if dtype == np.uint64:
- array_for_json = (np.cast[np.dtype('", "-be")]
+ for byte_order_i, (byte_order, byte_order_name) in enumerate(byte_orders):
+ new_array = np.array(array, dtype=dtype.newbyteorder(byte_order))
+ name = f"npy_test.{dtype.name}{byte_order_name}"
+ np.save(name, new_array)
+ array_for_json = array
+ if dtype == np.uint64:
+ array_for_json = (np.cast[np.dtype("