How to use neuroglancer - 10 common examples

To help you get started, we’ve selected a few neuroglancer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / neuroglancer / python / neuroglancer / tool / agglomeration_split_tool.py View on Github external
def run_batch(args, graph):
    for path in args.split_seeds:
        split_seeds = load_split_seeds(path)
        split_result = do_split(graph=graph, split_seeds=split_seeds, agglo_id=args.agglo_id)
        state = display_split_result(
            graph=graph,
            split_seeds=split_seeds,
            image_url=args.image_url,
            segmentation_url=args.segmentation_url,
            **split_result)
        print('<p><a href="%s">%s</a></p>' % (neuroglancer.to_url(state), path))
github google / neuroglancer / python / neuroglancer / tool / video_tool.py View on Github external
def run_render(args):
    keypoints = load_script(args.script)
    num_prefetch_frames = args.prefetch_frames
    for keypoint in keypoints:
        keypoint['state'].gpu_memory_limit = args.gpu_memory_limit
        keypoint['state'].system_memory_limit = args.system_memory_limit
        keypoint['state'].concurrent_downloads = args.concurrent_downloads
        keypoint['state'].cross_section_background_color = args.cross_section_background_color
    viewers = [neuroglancer.Viewer() for _ in range(args.shards)]
    for viewer in viewers:
        with viewer.config_state.txn() as s:
            s.show_ui_controls = False
            s.show_panel_borders = False
            s.viewer_size = [args.width, args.height]
            s.scale_bar_options.scale_factor = args.scale_bar_scale

        print('Open the specified URL to begin rendering')
        print(viewer)
        if args.browser:
            webbrowser.open_new(viewer.get_viewer_url())
    lock = threading.Lock()
    num_frames_written = [0]
    fps = args.fps
    total_frames = sum(max(1, k['transition_duration'] * fps) for k in keypoints[:-1])
github google / neuroglancer / python / neuroglancer / tool / agglomeration_split_tool.py View on Github external
def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
        self.graph = graph
        self.agglo_id = agglo_id
        self.image_url = image_url
        self.segmentation_url = segmentation_url
        self.state = InteractiveState(state_path)
        self.cached_split_result = CachedSplitResult(
            state=self.state, graph=self.graph, agglo_id=self.agglo_id)
        self.agglo_members = set(self.graph.get_agglo_members(agglo_id))

        if state_path is not None and os.path.exists(state_path):
            self.state.load()
        else:
            self.state.initialize(self.agglo_members)

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inclusive-seed', self._add_inclusive_seed)
        viewer.actions.add('exclusive-seed', self._add_exclusive_seed)
        viewer.actions.add('next-component', self._next_component)
        viewer.actions.add('prev-component', self._prev_component)
        viewer.actions.add('new-component', self._make_new_component)
        viewer.actions.add('exclude-component', self._exclude_component)
        viewer.actions.add('exclude-all-but-component', self._exclude_all_but_component)

        key_bindings = [
            ['bracketleft', 'prev-component'],
            ['bracketright', 'next-component'],
            ['at:dblclick0', 'exclude-component'],
            ['at:shift+mousedown2', 'exclude-all-but-component'],
            ['at:control+mousedown0', 'inclusive-seed'],
            ['at:shift+mousedown0', 'exclusive-seed'],
            ['enter', 'new-component'],
github google / neuroglancer / python / examples / example.py View on Github external
args = ap.parse_args()
if args.bind_address:
    neuroglancer.set_server_bind_address(args.bind_address)
if args.static_content_url:
    neuroglancer.set_static_content_source(url=args.static_content_url)

a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')

viewer = neuroglancer.Viewer()
dimensions = neuroglancer.CoordinateSpace(
    names=['x', 'y', 'z'],
    units='nm',
    scales=[10, 10, 10])
with viewer.txn() as s:
    s.dimensions = dimensions
    s.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c^', 'x', 'y', 'z'],
                units=['', 'nm','nm','nm'],
                scales=[1, 10, 10, 10]),
            voxel_offset=(0, 20, 30, 15),
        ),
github google / neuroglancer / python / examples / example_overlay.py View on Github external
ap = argparse.ArgumentParser()
ap.add_argument(
    '-a',
    '--bind-address',
    help='Bind address for Python web server.  Use 127.0.0.1 (the default) to restrict access '
    'to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.')
ap.add_argument(
    '--static-content-url', help='Obtain the Neuroglancer client code from the specified URL.')
args = ap.parse_args()
if args.bind_address:
    neuroglancer.set_server_bind_address(args.bind_address)
if args.static_content_url:
    neuroglancer.set_static_content_source(url=args.static_content_url)

viewer = neuroglancer.Viewer()

a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
    )
    s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
    )
    s.layers['overlay'] = neuroglancer.ImageLayer(
        source=neuroglancer.LocalVolume(
github google / neuroglancer / python / examples / extend_segments_tool.py View on Github external
def __init__(self, filename):
        self.filename = filename
        self.point_annotation_layer_name = 'false-merges'
        self.states = []
        self.state_index = None
        viewer = self.viewer = neuroglancer.Viewer()
        self.other_state_segment_ids = dict()

        viewer.actions.add('anno-next-state', lambda s: self.next_state())
        viewer.actions.add('anno-prev-state', lambda s: self.prev_state())
        viewer.actions.add('anno-save', lambda s: self.save())
        viewer.actions.add('anno-show-all', lambda s: self.set_combined_state())
        viewer.actions.add('anno-add-segments-from-state',
                           lambda s: self.add_segments_from_state(s.viewer_state))

        with viewer.config_state.txn() as s:
            s.input_event_bindings.viewer['pageup'] = 'anno-prev-state'
            s.input_event_bindings.viewer['pagedown'] = 'anno-next-state'
            s.input_event_bindings.viewer['control+keys'] = 'anno-save'
            s.input_event_bindings.viewer['control+keya'] = 'anno-show-all'

        viewer.shared_state.add_changed_callback(self.on_state_changed)
github google / neuroglancer / python / examples / interactive_inference.py View on Github external
def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(
            data=self.inf_results, dimensions=self.dimensions)
github google / neuroglancer / python / neuroglancer / tool / filter_bodies.py View on Github external
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
        self.state = State(state_path)
        self.num_to_prefetch = num_to_prefetch
        self.viewer = neuroglancer.Viewer()
        self.bodies = bodies
        self.state.load()
        self.total_voxels = sum(x.num_voxels for x in bodies)
        self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])

        with self.viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
            s.show_slices = False
            s.concurrent_downloads = 256
            s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
            s.layout = '3d'

        key_bindings = [
            ['bracketleft', 'prev-index'],
            ['bracketright', 'next-index'],
github google / neuroglancer / python / examples / example_action.py View on Github external
from __future__ import print_function

import webbrowser

import neuroglancer

viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
    )

def my_action(s):
    print('Got my-action')
    print('  Mouse position: %s' % (s.mouse_voxel_coordinates,))
    print('  Layer selected values: %s' % (s.selected_values,))
viewer.actions.add('my-action', my_action)
with viewer.config_state.txn() as s:
    s.input_event_bindings.viewer['keyt'] = 'my-action'
    s.status_messages['hello'] = 'Welcome to this example'

print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
github google / neuroglancer / python / examples / synaptic_partners.py View on Github external
def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.projection_orientation = [0.63240087, 0.01582051, 0.05692779, 0.77238464]
            s.dimensions = dimensions
            s.position = [3000, 3000, 3000]
            s.layers['image'] = neuroglancer.ImageLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )