diff --git a/.github/workflows/black.yaml b/.github/workflows/black.yaml new file mode 100644 index 0000000..fb9c6d7 --- /dev/null +++ b/.github/workflows/black.yaml @@ -0,0 +1,19 @@ +name: Python Black + +on: [push, pull_request] + +jobs: + lint: + name: Python Lint + runs-on: ubuntu-latest + steps: + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Setup checkout + uses: actions/checkout@master + - name: Lint with Black + run: | + pip install black + black funlib/show/neuroglancer --diff --check diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..05ba0a8 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,34 @@ +name: Publish + +on: + push: + tags: ["*"] + +jobs: + build-n-publish: + name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install pypa/build + run: >- + python -m + pip install + build + --user + - name: Build a binary wheel and a source tarball + run: >- + python -m + build + --sdist + --wheel + --outdir dist/ + - name: Publish distribution 📦 to PyPI + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file diff --git a/README.rst b/README.rst index 1bf7320..ad41e1d 100644 --- a/README.rst +++ b/README.rst @@ -11,3 +11,32 @@ Currently contains: - a modified ``video_tool.py`` to work with ``LocalVolume`` - a convenience method to add daisy-like arrays to a neuroglancer context + +Usage +----- + +You can use terminal glob syntax to select multiple volumes for visualization: + +`neuroglancer path/to/your/data.zarr/and/volumes/*` + +Extra optional arguments are: + +- `--no-browser` +- `--bind-address` +- `--port` + + +We also have slicing support (This command will select only the first channel of raw from every crop): + +`neuroglancer data.zarr/crop_*/raw[0]` + +This can lead to a conflict if you want to use glob expansion to select multiple arrays. +Here it is unclear if you want to visualize the arrays `raw_1`, `raw_3`, `raw_4` and `raw_5`, +or if you want to visualize channel `1345` of the array `raw_`. + +`neuroglancer data.zarr/raw_[1345]` + +To make this less ambiguous, we allow the use of the `:` character to separate the array glob from the +slicing patter. So the following command will select the arrays `raw_1`, `raw_3`, `raw_4` and `raw_5`: + +`neuroglancer data.zarr/raw_[1345]:` \ No newline at end of file diff --git a/funlib/show/neuroglancer/__init__.py b/funlib/show/neuroglancer/__init__.py index e07216d..64e6130 100644 --- a/funlib/show/neuroglancer/__init__.py +++ b/funlib/show/neuroglancer/__init__.py @@ -1,3 +1,7 @@ -from .video_tool import RenderArgs, run_edit, run_render -from .scale_pyramid import ScalePyramid -from .add_layer import add_layer +from .video_tool import ( + RenderArgs as RenderArgs, + run_edit as run_edit, + run_render as run_render, +) +from .scale_pyramid import ScalePyramid as ScalePyramid +from .add_layer import add_layer as add_layer diff --git a/funlib/show/neuroglancer/add_layer.py b/funlib/show/neuroglancer/add_layer.py index 2b41682..ccdac83 100644 --- a/funlib/show/neuroglancer/add_layer.py +++ b/funlib/show/neuroglancer/add_layer.py @@ -1,32 +1,33 @@ from .scale_pyramid import ScalePyramid import neuroglancer +from funlib.persistence import Array -rgb_shader_code = ''' +rgb_shader_code = """ void main() { emitRGB( - %f*vec3( + vec3( toNormalized(getDataValue(%i)), toNormalized(getDataValue(%i)), toNormalized(getDataValue(%i))) ); -}''' +}""" -color_shader_code = ''' +color_shader_code = """ void main() { emitRGBA( vec4( %f, %f, %f, toNormalized(getDataValue())) ); -}''' +}""" -binary_shader_code = ''' +binary_shader_code = """ void main() { emitGrayscale(255.0*toNormalized(getDataValue())); -}''' +}""" -heatmap_shader_code = ''' +heatmap_shader_code = """ void main() { float v = toNormalized(getDataValue(0)); vec4 rgba = vec4(0,0,0,0); @@ -34,102 +35,117 @@ rgba = vec4(colormapJet(v), 1.0); } emitRGBA(rgba); -}''' +}""" -def parse_dims(array): +def create_coordinate_space( + array: Array, +) -> tuple[neuroglancer.CoordinateSpace, list[int]]: + assert array.spatial_dims > 0 - if type(array) == list: - array = array[0] - - dims = len(array.data.shape) - spatial_dims = array.roi.dims - channel_dims = dims - spatial_dims - - print("dims :", dims) - print("spatial dims:", spatial_dims) - print("channel dims:", channel_dims) + def interleave(list, fill_value, axis_names): + return_list = [fill_value] * len(axis_names) + for i, name in enumerate(axis_names): + if "^" not in name: + return_list[i] = list.pop(0) + return return_list - return dims, spatial_dims, channel_dims + units = interleave(list(array.units), "", array.axis_names) + scales = interleave(list(array.voxel_size), 1, array.axis_names) + offset = interleave(list(array.offset / array.voxel_size), 0, array.axis_names) + return ( + neuroglancer.CoordinateSpace( + names=array.axis_names, units=units, scales=scales + ), + offset, + ) -def create_coordinate_space(array, spatial_dim_names, channel_dim_names, unit): - dims, spatial_dims, channel_dims = parse_dims(array) - assert spatial_dims > 0 - - if channel_dims > 0: - channel_names = channel_dim_names[-channel_dims:] - else: - channel_names = [] - spatial_names = spatial_dim_names[-spatial_dims:] - names = channel_names + spatial_names - units = [""] * channel_dims + [unit] * spatial_dims - scales = [1] * channel_dims + list(array.voxel_size) - - print("Names :", names) - print("Units :", units) - print("Scales :", scales) - - return neuroglancer.CoordinateSpace( - names=names, - units=units, - scales=scales) +def guess_shader_code(array: Array): + """ + TODO: This function is not used yet. + It should make some reasonable guesses for basic visualization parameters. + Guess volume type (or read from optional metadata?): + - bool/uint32/uint64/int32/int64 -> Segmentation + - floats/int8/uint8 -> Image + Guess shader for Image volumes: + - 1 channel dimension: + - 1 channel -> grayscale (add shader options for color and threshold) + - 2 channels -> projected RGB (set B to 0 or 1 or R+G?) + - 3 channels -> RGB + - 4 channels -> projected RGB (PCA? Random linear combinations? Randomizable with "l" key?) + - multiple channel dimensions?: + """ + raise NotImplementedError() + channel_dim_shapes = [ + array.shape[i] + for i in range(len(array.axis_names)) + if "^" in array.axis_names[i] + ] + if len(channel_dim_shapes) == 0: + return None # default shader + + if len(channel_dim_shapes) == 1: + num_channels = channel_dim_shapes[0] + if num_channels == 1: + return None # default shader + if num_channels == 2: + return projected_rgb_shader_code % num_channels + if num_channels == 3: + return rgb_shader_code % (0, 1, 2) + if num_channels > 3: + return projected_rgb_shader_code % num_channels def create_shader_code( - shader, - channel_dims, - rgb_channels=None, - color=None, - scale_factor=1.0): - + shader, channel_dims, rgb_channels=None, color=None, scale_factor=1.0 +): if shader is None: if channel_dims > 1: - shader = 'rgb' + shader = "rgb" else: return None if rgb_channels is None: rgb_channels = [0, 1, 2] - if shader == 'rgb': + if shader == "rgb": return rgb_shader_code % ( scale_factor, rgb_channels[0], rgb_channels[1], - rgb_channels[2]) + rgb_channels[2], + ) - if shader == 'color': - assert color is not None, \ - "You have to pass argument 'color' to use the color shader" + if shader == "color": + assert ( + color is not None + ), "You have to pass argument 'color' to use the color shader" return color_shader_code % ( color[0], color[1], color[2], ) - if shader == 'binary': + if shader == "binary": return binary_shader_code - if shader == 'heatmap': + if shader == "heatmap": return heatmap_shader_code def add_layer( - context, - array, - name, - spatial_dim_names=None, - channel_dim_names=None, - opacity=None, - shader=None, - rgb_channels=None, - color=None, - visible=True, - value_scale_factor=1.0, - units='nm'): - + context, + array: Array | list[Array], + name: str, + opacity: float | None = None, + shader: str | None = None, + rgb_channels=None, + color=None, + visible=True, + value_scale_factor=1.0, +): """Add a layer to a neuroglancer context. Args: @@ -149,19 +165,6 @@ def add_layer( The name of the layer. - spatial_dim_names: - - The names of the spatial dimensions. Defaults to ``['t', 'z', 'y', - 'x']``. The last elements of this list will be used (e.g., if your - data is 2D, the channels will be ``['y', 'x']``). - - channel_dim_names: - - The names of the non-spatial (channel) dimensions. Defaults to - ``['b^', 'c^']``. The last elements of this list will be used - (e.g., if your data is 2D but the shape of the array is 3D, the - channels will be ``['c^']``). - opacity: A float to define the layer opacity between 0 and 1. @@ -200,54 +203,29 @@ def add_layer( The units used for resolution and offset. """ - if channel_dim_names is None: - channel_dim_names = ["b", "c^"] - if spatial_dim_names is None: - spatial_dim_names = ["t", "z", "y", "x"] - if rgb_channels is None: rgb_channels = [0, 1, 2] - is_multiscale = type(array) == list - - dims, spatial_dims, channel_dims = parse_dims(array) + is_multiscale = isinstance(array, list) if is_multiscale: - dimensions = [] for a in array: - dimensions.append( - create_coordinate_space( - a, - spatial_dim_names, - channel_dim_names, - units)) - - # why only one offset, shouldn't that be a list? - voxel_offset = [0] * channel_dims + \ - list(array[0].roi.offset / array[0].voxel_size) + dimensions.append(create_coordinate_space(a)) layer = ScalePyramid( [ neuroglancer.LocalVolume( - data=a.data, - voxel_offset=voxel_offset, - dimensions=array_dims + data=a.data, voxel_offset=voxel_offset, dimensions=array_dims ) - for a, array_dims in zip(array, dimensions) + for a, (array_dims, voxel_offset) in zip(array, dimensions) ] ) - else: - - voxel_offset = [0] * channel_dims + \ - list(array.roi.offset / array.voxel_size) + array = array[0] - dimensions = create_coordinate_space( - array, - spatial_dim_names, - channel_dim_names, - units) + else: + dimensions, voxel_offset = create_coordinate_space(array) layer = neuroglancer.LocalVolume( data=array.data, @@ -255,37 +233,30 @@ def add_layer( dimensions=dimensions, ) - shader_code = create_shader_code( - shader, - channel_dims, - rgb_channels, - color, - value_scale_factor) + if shader is not None: + shader_code = create_shader_code( + shader, array.channel_dims, rgb_channels, color, value_scale_factor + ) + else: + shader_code = None if opacity is not None: if shader_code is None: context.layers.append( - name=name, - layer=layer, - visible=visible, - opacity=opacity) + name=name, layer=layer, visible=visible, opacity=opacity + ) else: context.layers.append( name=name, layer=layer, visible=visible, shader=shader_code, - opacity=opacity) + opacity=opacity, + ) else: if shader_code is None: - context.layers.append( - name=name, - layer=layer, - visible=visible) + context.layers.append(name=name, layer=layer, visible=visible) else: context.layers.append( - name=name, - layer=layer, - visible=visible, - shader=shader_code) - + name=name, layer=layer, visible=visible, shader=shader_code + ) diff --git a/funlib/show/neuroglancer/cli.py b/funlib/show/neuroglancer/cli.py index 02c4857..8d82cb4 100644 --- a/funlib/show/neuroglancer/cli.py +++ b/funlib/show/neuroglancer/cli.py @@ -2,210 +2,135 @@ from funlib.show.neuroglancer import add_layer from funlib.persistence import open_ds -from funlib.geometry import Roi import argparse import glob import neuroglancer import os import webbrowser -import numpy as np -import zarr +from pathlib import Path +import numpy as np # noqa: F401 This import is used in the eval statement below - -def to_slice(slice_str): - - values = [int(x) for x in slice_str.split(':')] - if len(values) == 1: - return values[0] - - return slice(*values) - def parse_ds_name(ds): + if ":" in ds: + ds, *slices = ds.split(":") + else: + ds, *slices = ds.split("[") - tokens = ds.split('[') - - if len(tokens) == 1: + if len(slices) == 0: return ds, None + elif len(slices) == 1: + slices = slices[0].strip("[]") + if len(slices) == 0: + slices = ":" + slices = eval(f"np.s_[{slices}]") + return ds, slices + else: + raise ValueError("Used multiple sets of brackets") - ds, slices = tokens - slices = list(map(to_slice, slices.rstrip(']').split(','))) - - return ds, slices - -class Project: - - def __init__(self, array, dim, value): - self.array = array - self.dim = dim - self.value = value - self.shape = array.shape[:self.dim] + array.shape[self.dim + 1:] - self.dtype = array.dtype - - def __getitem__(self, key): - slices = key[:self.dim] + (self.value,) + key[self.dim:] - ret = self.array[slices] - return ret - -def slice_dataset(a, slices): - - dims = a.roi.dims - for d, s in list(enumerate(slices))[::-1]: +class SliceAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + dest = getattr(namespace, self.dest) + if dest is None: + dest = [] + setattr(namespace, self.dest, dest) + assert isinstance( + dest, list + ), "Only one --slice/-s argument allowed to follow --dataset/-d" + assert ( + len(dest) > 0 + ), "The --slice/-s argument has to follow a --dataset/-d argument" + assert ( + len(values) == 1 + ), "The --slice/-s option should have exactly one argument" - if isinstance(s, slice): - raise NotImplementedError("Slicing not yet implemented!") - else: - index = (s - a.roi.begin[d])//a.voxel_size[d] - a.data = Project(a.data, d, index) - a.roi = Roi( - a.roi.begin[:d] + a.roi.begin[d + 1:], - a.roi.shape[:d] + a.roi.shape[d + 1:]) - a.voxel_size = a.voxel_size[:d] + a.voxel_size[d + 1:] - - return a - -def open_dataset(f, ds): - original_ds = ds - ds, slices = parse_ds_name(ds) - slices_str = original_ds[len(ds):] - - try: - dataset_as = [] - if all(key.startswith("s") for key in zarr.open(f)[ds].keys()): - raise AttributeError("This group is a multiscale array!") - for key in zarr.open(f)[ds].keys(): - dataset_as.extend(open_dataset(f, f"{ds}/{key}{slices_str}")) - return dataset_as - except AttributeError as e: - # dataset is an array, not a group - pass - - print("ds :", ds) - print("slices:", slices) - try: - zarr.open(f)[ds].keys() - is_multiscale = True - except: - is_multiscale = False - - if not is_multiscale: - a = open_ds(f, ds) - - if slices is not None: - a = slice_dataset(a, slices) - - if a.data.dtype == np.int64 or a.data.dtype == np.int16: - print("Converting dtype in memory...") - a.data = a.data[:].astype(np.uint64) - - return [(a, ds)] - else: - return [([open_ds(f, f"{ds}/{key}") for key in zarr.open(f)[ds].keys()], ds)] + dest[-1] = (dest[-1], values[0]) parser = argparse.ArgumentParser() parser.add_argument( - '--file', - '-f', + "--dataset", + "-d", type=str, - action='append', - help="The path to the container to show") + nargs="+", + action="append", + help="The paths to the datasets to show", + dest="datasetslice", +) parser.add_argument( - '--datasets', - '-d', + "--slices", + "-s", type=str, - nargs='+', - action='append', - help="The datasets in the container to show") + nargs="+", + action=SliceAction, + help="A slice operation to apply to the given datasets", + dest="datasetslice", +) parser.add_argument( - '--graphs', - '-g', - type=str, - nargs='+', - action='append', - help="The graphs in the container to show") -parser.add_argument( - '--no-browser', - '-n', + "--no-browser", + "-n", type=bool, - nargs='?', default=False, - const=True, - help="If set, do not open a browser, just print a URL") - + help="If set, do not open a browser, just print a URL", +) +parser.add_argument( + "--bind-address", + "-b", + type=str, + default="0.0.0.0", + help="Bind address", +) +parser.add_argument("--port", type=int, default=0, help="The port to bind to.") def main(): - args = parser.parse_args() - neuroglancer.set_server_bind_address('0.0.0.0') + neuroglancer.set_server_bind_address(args.bind_address, bind_port=args.port) viewer = neuroglancer.Viewer() - for f, datasets in zip(args.file, args.datasets): - - arrays = [] - for ds in datasets: - try: - - print("Adding %s, %s" % (f, ds)) - dataset_as = open_dataset(f, ds) - - except Exception as e: - - print(type(e), e) - print("Didn't work, checking if this is multi-res...") - - scales = glob.glob(os.path.join(f, ds, 's*')) - if len(scales) == 0: - print(f"Couldn't read {ds}, skipping...") - raise e - print("Found scales %s" % ([ - os.path.relpath(s, f) - for s in scales - ],)) - a = [ - open_dataset(f, os.path.relpath(scale_ds, f)) - for scale_ds in scales - ] - for a in dataset_as: - arrays.append(a) - - with viewer.txn() as s: - for array, dataset in arrays: - add_layer(s, array, dataset) - - if args.graphs: - for f, graphs in zip(args.file, args.graphs): - - for graph in graphs: + for datasetslice in args.datasetslice: + if isinstance(datasetslice, tuple): + datasets, slices = datasetslice[0], eval(f"np.s_[{datasetslice[1]}]") + elif isinstance(datasetslice, list): + datasets, slices = datasetslice, None + else: + raise NotImplementedError("Unreachable!") - graph_annotations = [] + for glob_path in datasets: + print(f"Adding {glob_path} with slices {slices}") + for ds_path in glob.glob(glob_path): + ds_path = Path(ds_path) try: - ids = open_ds(f, graph + '-ids').data - loc = open_ds(f, graph + '-locations').data - except: - loc = open_ds(f, graph).data - ids = None - dims = loc.shape[-1] - loc = loc[:].reshape((-1, dims)) - if ids is None: - ids = range(len(loc)) - for i, l in zip(ids, loc): - if dims == 2: - l = np.concatenate([[0], l]) - graph_annotations.append( - neuroglancer.EllipsoidAnnotation( - center=l[::-1], - radii=(5, 5, 5), - id=i)) - graph_layer = neuroglancer.AnnotationLayer( - annotations=graph_annotations, - voxel_size=(1, 1, 1)) + print("Adding %s" % (ds_path)) + array = open_ds(ds_path) + arrays = [(array, ds_path)] + + except Exception as e: + print(type(e), e) + print("Didn't work, checking if this is multi-res...") + + scales = glob.glob(f"{ds_path}/s*") + if len(scales) == 0: + print(f"Couldn't read {ds_path}, skipping...") + raise e + print( + "Found scales %s" + % ([os.path.relpath(s, ds_path) for s in scales],) + ) + arrays = [([open_ds(scale_ds) for scale_ds in scales], ds_path)] + + for array, _ in arrays: + if not isinstance(array, list): + array = [array] + for arr in array: + if slices is not None: + arr.lazy_op(slices) with viewer.txn() as s: - s.layers.append(name='graph', layer=graph_layer) + for array, dataset in arrays: + add_layer(s, array, Path(dataset).name) url = str(viewer) print(url) diff --git a/funlib/show/neuroglancer/video_tool.py b/funlib/show/neuroglancer/video_tool.py index 18b7ecd..99c05f4 100755 --- a/funlib/show/neuroglancer/video_tool.py +++ b/funlib/show/neuroglancer/video_tool.py @@ -62,7 +62,6 @@ from __future__ import print_function, division -import argparse import bisect import math import os @@ -71,10 +70,9 @@ import webbrowser import neuroglancer -import json -class RenderArgs(): +class RenderArgs: def __init__(self): @@ -89,9 +87,9 @@ def __init__(self): self.gpu_memory_limit = 3000000000 self.system_memory_limit = 3000000000 self.concurrent_downloads = 32 - self.cross_section_background_color = 'black' + self.cross_section_background_color = "black" self.shards = 1 - self.output_directory = '.' + self.output_directory = "." self.resume = False @@ -104,11 +102,13 @@ def __init__(self, keypoints, frames_per_second): self.keypoint_start_frame = [] self.keypoint_end_frame = [] for k in keypoints[:-1]: - duration = k['transition_duration'] + duration = k["transition_duration"] if duration == 0: cur_frames = 0 else: - cur_frames = max(1, int(round(k['transition_duration'] * frames_per_second))) + cur_frames = max( + 1, int(round(k["transition_duration"] * frames_per_second)) + ) self.keypoint_start_frame.append(self.total_frames) self.total_frames += cur_frames self.keypoint_end_frame.append(self.total_frames) @@ -126,12 +126,12 @@ def get_frame_from_elapsed_time(self, elapsed_time): def get_frame(self, frame_i): start_keypoint = self.get_keypoint_from_frame(frame_i) - a = self.keypoints[start_keypoint]['state'] + a = self.keypoints[start_keypoint]["state"] if start_keypoint == len(self.keypoints) - 1: return a else: end_keypoint = start_keypoint + 1 - b = self.keypoints[end_keypoint]['state'] + b = self.keypoints[end_keypoint]["state"] start_frame = self.keypoint_start_frame[start_keypoint] end_frame = self.keypoint_end_frame[start_keypoint] t = (frame_i - start_frame) / (end_frame - start_frame) @@ -150,22 +150,29 @@ def set_state(self, viewer, frame_i, prefetch_frames): del s.prefetch[:] for i, state in enumerate(states[1:]): s.prefetch.append( - neuroglancer.PrefetchState(state=state, priority=prefetch_frames - i)) + neuroglancer.PrefetchState( + state=state, priority=prefetch_frames - i + ) + ) class EditorPlaybackManager(object): def __init__(self, script_editor, playing=True, frames_per_second=5): self.script_editor = script_editor self.frames_per_second = frames_per_second - self.playback_manager = PlaybackManager(script_editor.keypoints, frames_per_second=self.frames_per_second) + self.playback_manager = PlaybackManager( + script_editor.keypoints, frames_per_second=self.frames_per_second + ) self.current_keypoint_index = max(1, script_editor.keypoint_index) self.script_editor._set_keypoint_index(self.current_keypoint_index) self.playing = playing script_editor.playback_manager = self - self.current_frame = self.playback_manager.keypoint_start_frame[self.current_keypoint_index - 1] + self.current_frame = self.playback_manager.keypoint_start_frame[ + self.current_keypoint_index - 1 + ] self.start_time = ( - time.time() - self.current_frame / - self.playback_manager.frames_per_second) + time.time() - self.current_frame / self.playback_manager.frames_per_second + ) t = threading.Thread(target=self._thread_func) t.daemon = True t.start() @@ -174,17 +181,31 @@ def __init__(self, script_editor, playing=True, frames_per_second=5): def _update_current_frame(self): elapsed_time = time.time() - self.start_time - self.current_frame = self.playback_manager.get_frame_from_elapsed_time(elapsed_time) + self.current_frame = self.playback_manager.get_frame_from_elapsed_time( + elapsed_time + ) def _display_frame(self): frame_i = self.current_frame - keypoint_index = self.playback_manager.get_keypoint_from_frame( - min(frame_i, self.playback_manager.total_frames - 1)) + 1 - current_duration = self.script_editor.keypoints[keypoint_index - 1]['transition_duration'] - transition_time = (frame_i - self.playback_manager.keypoint_start_frame[keypoint_index - 1] - ) / self.playback_manager.frames_per_second - self.playback_status = '%s frame %d/%d transition %.1f/%g' % ( - 'PLAYING' if self.playing else 'PAUSED', frame_i, self.playback_manager.total_frames, transition_time, current_duration) + keypoint_index = ( + self.playback_manager.get_keypoint_from_frame( + min(frame_i, self.playback_manager.total_frames - 1) + ) + + 1 + ) + current_duration = self.script_editor.keypoints[keypoint_index - 1][ + "transition_duration" + ] + transition_time = ( + frame_i - self.playback_manager.keypoint_start_frame[keypoint_index - 1] + ) / self.playback_manager.frames_per_second + self.playback_status = "%s frame %d/%d transition %.1f/%g" % ( + "PLAYING" if self.playing else "PAUSED", + frame_i, + self.playback_manager.total_frames, + transition_time, + current_duration, + ) if keypoint_index != self.current_keypoint_index: self.script_editor._set_keypoint_index(keypoint_index) self.current_keypoint_index = keypoint_index @@ -193,11 +214,15 @@ def _display_frame(self): self.script_editor._update_status() self.should_stop.set() return - self.playback_manager.set_state(self.script_editor.viewer, frame_i, prefetch_frames=10) + self.playback_manager.set_state( + self.script_editor.viewer, frame_i, prefetch_frames=10 + ) self.script_editor._update_status() def reload(self): - self.playback_manager = PlaybackManager(self.script_editor.keypoints, frames_per_second=self.frames_per_second) + self.playback_manager = PlaybackManager( + self.script_editor.keypoints, frames_per_second=self.frames_per_second + ) self.current_keypoint_index = None self.seek_frame(0) @@ -205,14 +230,20 @@ def pause(self): if self.playing: self.seek_frame(0) else: - self.start_time = time.time() - self.current_frame / self.playback_manager.frames_per_second + self.start_time = ( + time.time() + - self.current_frame / self.playback_manager.frames_per_second + ) self.playing = True + def seek_frame(self, amount): if self.playing: self.playing = False self._update_current_frame() self.current_frame += amount - self.current_frame = max(0, min(self.current_frame, self.playback_manager.total_frames - 1)) + self.current_frame = max( + 0, min(self.current_frame, self.playback_manager.total_frames - 1) + ) self._display_frame() def _thread_func(self): @@ -234,7 +265,7 @@ def _update(self): def load_script(script_path, transition_duration=1): keypoints = [] - with open(script_path, 'r') as f: + with open(script_path, "r") as f: while True: url = f.readline() if not url: @@ -244,26 +275,32 @@ def load_script(script_path, transition_duration=1): duration = transition_duration else: duration = float(line) - keypoints.append({ - 'state': neuroglancer.parse_url(url), - 'transition_duration': duration - }) + keypoints.append( + {"state": neuroglancer.parse_url(url), "transition_duration": duration} + ) return keypoints def save_script(script_path, keypoints): - temp_path = script_path + '.tmp' - with open(temp_path, 'w') as f: + temp_path = script_path + ".tmp" + with open(temp_path, "w") as f: for x in keypoints: - f.write(neuroglancer.to_url(x['state']) + '\n') - f.write(str(x['transition_duration']) + '\n') + f.write(neuroglancer.to_url(x["state"]) + "\n") + f.write(str(x["transition_duration"]) + "\n") os.rename(temp_path, script_path) class ScriptEditor(object): - def __init__(self, create_viewer_func, script_path, transition_duration, - fullscreen_width, fullscreen_height, fullscreen_scale_bar_scale, - frames_per_second): + def __init__( + self, + create_viewer_func, + script_path, + transition_duration, + fullscreen_width, + fullscreen_height, + fullscreen_scale_bar_scale, + frames_per_second, + ): self.viewer = create_viewer_func() self.script_path = script_path self.frames_per_second = frames_per_second @@ -283,47 +320,49 @@ def __init__(self, create_viewer_func, script_path, transition_duration, self.is_dirty = True self.is_fullscreen = False keybindings = [ - ('keyk', 'add-keypoint'), - ('bracketleft', 'prev-keypoint'), - ('bracketright', 'next-keypoint'), - ('backspace', 'delete-keypoint'), - ('shift+bracketleft', 'decrease-duration'), - ('shift+bracketright', 'increase-duration'), - ('home', 'first-keypoint'), - ('end', 'last-keypoint'), - ('keyq', 'quit'), - ('enter', 'toggle-play'), - ('keyf', 'toggle-fullscreen'), - ('keyj', 'revert-script'), - ('comma', 'prev-frame'), - ('period', 'next-frame'), + ("keyk", "add-keypoint"), + ("bracketleft", "prev-keypoint"), + ("bracketright", "next-keypoint"), + ("backspace", "delete-keypoint"), + ("shift+bracketleft", "decrease-duration"), + ("shift+bracketright", "increase-duration"), + ("home", "first-keypoint"), + ("end", "last-keypoint"), + ("keyq", "quit"), + ("enter", "toggle-play"), + ("keyf", "toggle-fullscreen"), + ("keyj", "revert-script"), + ("comma", "prev-frame"), + ("period", "next-frame"), ] with self.viewer.config_state.txn() as s: for k, a in keybindings: s.input_event_bindings.viewer[k] = a s.input_event_bindings.slice_view[k] = a s.input_event_bindings.perspective_view[k] = a - self._keybinding_message = ' '.join('%s=%s' % x for x in keybindings) - self.viewer.actions.add('add-keypoint', self._add_keypoint) - self.viewer.actions.add('prev-keypoint', self._prev_keypoint) - self.viewer.actions.add('next-keypoint', self._next_keypoint) - self.viewer.actions.add('delete-keypoint', self._delete_keypoint) - self.viewer.actions.add('increase-duration', self._increase_duration) - self.viewer.actions.add('decrease-duration', self._decrease_duration) - self.viewer.actions.add('first-keypoint', self._first_keypoint) - self.viewer.actions.add('last-keypoint', self._last_keypoint) - self.viewer.actions.add('quit', self._quit) - self.viewer.actions.add('toggle-play', self._toggle_play) - self.viewer.actions.add('toggle-fullscreen', self._toggle_fullscreen) - self.viewer.actions.add('revert-script', self._revert_script) - self.viewer.actions.add('next-frame', self._next_frame) - self.viewer.actions.add('prev-frame', self._prev_frame) + self._keybinding_message = " ".join("%s=%s" % x for x in keybindings) + self.viewer.actions.add("add-keypoint", self._add_keypoint) + self.viewer.actions.add("prev-keypoint", self._prev_keypoint) + self.viewer.actions.add("next-keypoint", self._next_keypoint) + self.viewer.actions.add("delete-keypoint", self._delete_keypoint) + self.viewer.actions.add("increase-duration", self._increase_duration) + self.viewer.actions.add("decrease-duration", self._decrease_duration) + self.viewer.actions.add("first-keypoint", self._first_keypoint) + self.viewer.actions.add("last-keypoint", self._last_keypoint) + self.viewer.actions.add("quit", self._quit) + self.viewer.actions.add("toggle-play", self._toggle_play) + self.viewer.actions.add("toggle-fullscreen", self._toggle_fullscreen) + self.viewer.actions.add("revert-script", self._revert_script) + self.viewer.actions.add("next-frame", self._next_frame) + self.viewer.actions.add("prev-frame", self._prev_frame) self.playback_manager = None self._set_keypoint_index(1) def _revert_script(self, s): if os.path.exists(self.script_path): - self.keypoints = load_script(self.script_path, self.default_transition_duration) + self.keypoints = load_script( + self.script_path, self.default_transition_duration + ) if self.playback_manager is not None: self.playback_manager.reload() else: @@ -345,19 +384,23 @@ def _toggle_fullscreen(self, s): def _next_frame(self, s): if self.playback_manager is None: - EditorPlaybackManager(self, playing=False, frames_per_second=self.frames_per_second) + EditorPlaybackManager( + self, playing=False, frames_per_second=self.frames_per_second + ) self.playback_manager.seek_frame(1) def _prev_frame(self, s): if self.playback_manager is None: - EditorPlaybackManager(self, playing=False, frames_per_second=self.frames_per_second) + EditorPlaybackManager( + self, playing=False, frames_per_second=self.frames_per_second + ) self.playback_manager.seek_frame(-1) def _add_keypoint(self, s): self.keypoints.insert( self.keypoint_index, - {'state': s.viewer_state, - 'transition_duration': self.transition_duration}) + {"state": s.viewer_state, "transition_duration": self.transition_duration}, + ) self.keypoint_index += 1 self.is_dirty = False self.save() @@ -376,7 +419,7 @@ def _set_transition_duration(self, value): self._stop_playback() self.transition_duration = value if self.keypoint_index > 0: - self.keypoints[self.keypoint_index - 1]['transition_duration'] = value + self.keypoints[self.keypoint_index - 1]["transition_duration"] = value self.save() self._update_status() @@ -392,7 +435,7 @@ def _decrease_duration(self, s): def _get_is_dirty(self): if self.keypoint_index == 0: return True - state = self.keypoints[self.keypoint_index - 1]['state'] + state = self.keypoints[self.keypoint_index - 1]["state"] return state.to_json() != self.viewer.state.to_json() def _viewer_state_changed(self): @@ -417,8 +460,10 @@ def _set_keypoint_index(self, index): self.keypoint_index = index state_index = max(0, index - 1) if len(self.keypoints) > 0: - self.viewer.set_state(self.keypoints[state_index]['state']) - self.transition_duration = self.keypoints[state_index]['transition_duration'] + self.viewer.set_state(self.keypoints[state_index]["state"]) + self.transition_duration = self.keypoints[state_index][ + "transition_duration" + ] self.is_dirty = False else: self.is_dirty = True @@ -447,18 +492,19 @@ def _update_status(self): if self.playback_manager is not None: dirty_message = self.playback_manager.playback_status elif self.is_dirty: - dirty_message = ' [ CHANGED ]' + dirty_message = " [ CHANGED ]" else: - dirty_message = '' + dirty_message = "" - status = '[ Keypoint %d/%d ]%s [ transition duration %g s ] %s' % ( + status = "[ Keypoint %d/%d ]%s [ transition duration %g s ] %s" % ( self.keypoint_index, len(self.keypoints), dirty_message, self.transition_duration, - self._keybinding_message, ) + self._keybinding_message, + ) with self.viewer.config_state.txn() as s: - s.status_messages['status'] = status + s.status_messages["status"] = status def _quit(self, s): self.quit_event.set() @@ -472,7 +518,8 @@ def run_edit(create_viewer_func, args=RenderArgs()): fullscreen_width=args.width, fullscreen_height=args.height, fullscreen_scale_bar_scale=args.scale_bar_scale, - frames_per_second=args.fps) + frames_per_second=args.fps, + ) print(editor.viewer) if args.browser: webbrowser.open_new(editor.viewer.get_viewer_url()) @@ -483,10 +530,12 @@ def run_render(create_viewer_func, args=RenderArgs()): keypoints = load_script(args.script) num_prefetch_frames = args.prefetch_frames for keypoint in keypoints: - keypoint['state'].gpu_memory_limit = args.gpu_memory_limit - keypoint['state'].system_memory_limit = args.system_memory_limit - keypoint['state'].concurrent_downloads = args.concurrent_downloads - keypoint['state'].cross_section_background_color = args.cross_section_background_color + keypoint["state"].gpu_memory_limit = args.gpu_memory_limit + keypoint["state"].system_memory_limit = args.system_memory_limit + keypoint["state"].concurrent_downloads = args.concurrent_downloads + keypoint["state"].cross_section_background_color = ( + args.cross_section_background_color + ) viewers = [create_viewer_func() for _ in range(args.shards)] for viewer in viewers: with viewer.config_state.txn() as s: @@ -495,14 +544,14 @@ def run_render(create_viewer_func, args=RenderArgs()): s.viewer_size = [args.width, args.height] s.scale_bar_options.scale_factor = args.scale_bar_scale - print('Open the specified URL to begin rendering') + print("Open the specified URL to begin rendering") print(viewer) if args.browser: webbrowser.open_new(viewer.get_viewer_url()) lock = threading.Lock() num_frames_written = [0] fps = args.fps - total_frames = sum(max(1, k['transition_duration'] * fps) for k in keypoints[:-1]) + total_frames = sum(max(1, k["transition_duration"] * fps) for k in keypoints[:-1]) def render_func(viewer, start_frame, end_frame): with lock: @@ -510,9 +559,9 @@ def render_func(viewer, start_frame, end_frame): states_to_capture = [] frame_number = 0 for i in range(len(keypoints) - 1): - a = keypoints[i]['state'] - b = keypoints[i + 1]['state'] - duration = keypoints[i]['transition_duration'] + a = keypoints[i]["state"] + b = keypoints[i + 1]["state"] + duration = keypoints[i]["transition_duration"] num_frames = max(1, int(duration * fps)) for frame_i in range(num_frames): t = frame_i / num_frames @@ -536,11 +585,13 @@ def render_func(viewer, start_frame, end_frame): for frame_number, t, cur_state in states_to_capture: prefetch_states = [ x[2] - for x in states_to_capture[frame_number + 1:frame_number + 1 + num_prefetch_frames] + for x in states_to_capture[ + frame_number + 1 : frame_number + 1 + num_prefetch_frames + ] ] prev_state = viewer.state.to_json() cur_state = cur_state.to_json() - cur_state['layers'] = prev_state['layers'] + cur_state["layers"] = prev_state["layers"] cur_state = neuroglancer.ViewerState(cur_state) viewer.set_state(cur_state) if num_prefetch_frames > 0: @@ -548,13 +599,18 @@ def render_func(viewer, start_frame, end_frame): del s.prefetch[:] for i, state in enumerate(prefetch_states[1:]): s.prefetch.append( - neuroglancer.PrefetchState(state=state, priority=num_prefetch_frames - i)) + neuroglancer.PrefetchState( + state=state, priority=num_prefetch_frames - i + ) + ) frame_number, path = saver.capture(frame_number) with lock: num_frames_written[0] += 1 cur_num_frames_written = num_frames_written[0] - print('[%07d/%07d] keypoint %.3f/%5d: %s' % - (cur_num_frames_written, total_frames, t, len(keypoints), path)) + print( + "[%07d/%07d] keypoint %.3f/%5d: %s" + % (cur_num_frames_written, total_frames, t, len(keypoints), path) + ) shard_frames = [] frames_per_shard = int(math.ceil(total_frames / args.shards)) @@ -564,8 +620,7 @@ def render_func(viewer, start_frame, end_frame): shard_frames.append((start_frame, end_frame)) render_threads = [ threading.Thread(target=render_func, args=(viewer, start_frame, end_frame)) - for viewer, - (start_frame, end_frame) in zip(viewers, shard_frames) + for viewer, (start_frame, end_frame) in zip(viewers, shard_frames) ] for t in render_threads: t.start() diff --git a/pyproject.toml b/pyproject.toml index e185ed2..7ce1e11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ["setuptools", "wheel"] [project] name = "funlib.show.neuroglancer" -version = "0.1" +version = "0.2" description = "" readme = "README.md" authors = [{ name = "Jan Funke", email = "funkej@janelia.hhmi.org" }] @@ -16,6 +16,6 @@ dependencies = [ "neuroglancer >= 2", "numpy", "zarr", - "funlib.geometry", - "funlib.persistence", + "funlib.geometry >= 0.3.0", + "funlib.persistence >= 0.5.0", ]