Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def gen_watershed(self):
from gala import imio
import numpy
from skimage import morphology as skmorph
from scipy.ndimage import label
self.datadir = os.path.abspath(os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/"
prediction = imio.read_image_stack(self.datadir +"pixelprobs.h5",
group='/volume/prediction', single_channel=False)
boundary = prediction[...,0]
seeds = label(boundary==0)[0]
supervoxels = skmorph.watershed(boundary, seeds)
return supervoxels, boundary, prediction
session_location + "/" + options.supervoxels_name, compression='lzf')
if options.raveler_output:
image_stack = imio.read_image_stack(options.image_stack)
sps_out = output_raveler(supervoxels, supervoxels, image_stack,
"supervoxels", session_location, master_logger)
if options.synapse_file is not None:
shutil.copyfile(options.synapse_file,
session_location + "/raveler-export/supervoxels/annotations-synapse.json")
"""
# agglomerate and generate output
if options.gen_agglomeration:
if prediction is None and prediction_file is not None:
master_logger.info("Reading pixel prediction: " + prediction_file)
prediction = imio.read_image_stack(prediction_file,
group=PREDICTIONS_HDF5_GROUP)
prediction = prediction.transpose((2, 1, 0, 3))
master_logger.info("Finished reading pixel prediction")
elif prediction is None:
raise Exception("No pixel probs available for agglomeration")
flow_perform_agglomeration(options, supervoxels, prediction, image_stack,
session_location, sps_out, master_logger)
supervoxels = None
prediction = None
if options.gen_supervoxels:
supervoxels, prediction = gen_supervoxels(options, prediction_file, master_logger)
elif options.supervoxels_file:
master_logger.info("Reading supervoxels: " + options.supervoxels_file)
supervoxels = imio.read_image_stack(options.supervoxels_file)
#supervoxels = imio.read_mapped_segmentation(options.supervoxels_file)
master_logger.info("Finished reading supervoxels")
# write superpixels out to hdf5 and/or raveler files
sps_out = None
image_stack = None
if options.raveler_output:
image_stack = imio.read_image_stack(options.image_stack)
if options.h5_output:
imio.write_image_stack(supervoxels,
session_location + "/" + options.supervoxels_name)
"""
if supervoxels is not None:
if options.h5_output:
imio.write_image_stack(supervoxels,
session_location + "/" + options.supervoxels_name, compression='lzf')
if options.raveler_output:
image_stack = imio.read_image_stack(options.image_stack)
sps_out = output_raveler(supervoxels, supervoxels, image_stack,
"supervoxels", session_location, master_logger)
if options.synapse_file is not None:
shutil.copyfile(options.synapse_file,
def valprob(session_location, options, master_logger):
master_logger.info("Reading gt_stack")
gt_stack = imio.read_image_stack(options.gt_stack)
master_logger.info("Reading test_stack")
test_stack = imio.read_image_stack(options.test_stack)
master_logger.info("Finished reading stacks")
master_logger.info("Loading graph json")
pairprob_list = load_graph_json(options.ragprob_file)
master_logger.info("Finished loading graph json")
master_logger.info("Matching bodies to GT")
body2gtbody = find_gt_bodies(gt_stack, test_stack)
master_logger.info("Finished matching bodies to GT")
nomerge_hist = []
tot_hist = []
for iter1 in range(0, 101):
nomerge_hist.append(0)
tot_hist.append(0)
def gen_supervoxels(options, prediction_file, master_logger):
"""Returns ndarray labeled using (optionally seeded) watershed algorithm
Args:
options: OptionNamespace.
prediction_file: String. File name of prediction hdf5 file where predictions
are assumed to be in group PREDICTIONS_HDF5_GROUP.
Returns:
A 2-tuple of supervoxel and prediction ndarray.
"""
master_logger.debug("Generating supervoxels")
if not os.path.isfile(prediction_file):
raise Exception("Training file not found: " + prediction_file)
prediction = imio.read_image_stack(prediction_file, group=PREDICTIONS_HDF5_GROUP)
master_logger.info("Transposed boundary prediction")
prediction = prediction.transpose((2, 1, 0, 3))
#if options.extract_ilp_prediction:
# prediction = prediction.transpose((2, 1, 0))
# TODO -- Refactor. If 'single-channel' and hdf5 prediction file is given, it looks like
# read_image_stack will return a modified volume and the bound-channels parameter must
# be 0 or there'll be conflict.
boundary = grab_boundary(prediction, options.bound_channels, master_logger)
master_logger.info("Shape of boundary: %s" % str(boundary.shape))
# Prediction file is in format (t, x, y, z, c) but needs to be in format (z, x, y).
# Also, raveler convention is (0,0) sits in bottom left while ilastik convention is
# origin sits in top left.
# imio.read_image_stack squeezes out the first dim.
def auto(session_location, options, master_logger):
master_logger.info("Reading gt_stack")
gt_stack = imio.read_image_stack(options.gt_stack)
master_logger.info("Reading test_stack")
test_stack = imio.read_image_stack(options.test_stack)
master_logger.info("Finished reading stacks")
master_logger.info("Loading graph json")
pairprob_list = load_graph_json(options.ragprob_file)
master_logger.info("Finished loading graph json")
master_logger.info("Matching bodies to GT")
body2gtbody = find_gt_bodies(gt_stack, test_stack)
master_logger.info("Finished matching bodies to GT")
body2body = {}
for (node1, node2, dummy) in pairprob_list:
body2body[node1] = node1
body2body[node2] = node2