Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# imports
from gala import imio, classify, features, agglo, evaluate as ev
# read in training data
gt_train, pr_train, ws_train = (map(imio.read_h5_stack,
['train-gt.lzf.h5', 'train-p1.lzf.h5',
'train-ws.lzf.h5']))
# create a feature manager
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
# create graph and obtain a training dataset
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
(X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0]
y = y[:, 0] # gala has 3 truth labeling schemes, pick the first one
print((X.shape, y.shape)) # standard scikit-learn input format
# train a classifier, scikit-learn syntax
rf = classify.DefaultRandomForest().fit(X, y)
g_train4 = agglo.Rag(ws_train, p4_train, feature_manager=fc)
np.random.RandomState(0)
(X4, y4, w4, merges4) = map(np.copy, map(np.ascontiguousarray,
g_train4.learn_agglomerate(gt_train, fc)[0]))
print X4.shape
np.savez('example-data/train-set4.npz', X=X4, y=y4)
y4 = y4[:, 0]
rf4 = classify.DefaultRandomForest()
np.random.RandomState(0)
rf4 = rf4.fit(X4, y4)
classify.save_classifier(rf4, 'example-data/rf-4.joblib')
learned_policy4 = agglo.classifier_probability(fc, rf4)
g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc)
g_test4.agglomerate(0.5)
seg_test4 = g_test4.get_segmentation()
imio.write_h5_stack(seg_test4, 'example-data/test-seg4.lzf.h5', compression='lzf')
results = np.vstack((
ev.split_vi(ws_test, gt_test),
ev.split_vi(seg_test1, gt_test),
ev.split_vi(seg_test4, gt_test)
))
np.save('example-data/vi-results.npy', results)
import numpy as np
from gala import imio, classify, features, agglo, evaluate as ev
gt_train, pr_train, p4_train, ws_train = map(imio.read_h5_stack, ['example-data/train-gt.lzf.h5', 'example-data/train-p1.lzf.h5', 'example-data/train-p4.lzf.h5', 'example-data/train-ws.lzf.h5'])
gt_test, pr_test, p4_test, ws_test = map(imio.read_h5_stack, ['example-data/test-gt.lzf.h5', 'example-data/test-p1.lzf.h5', 'example-data/test-p4.lzf.h5', 'example-data/test-ws.lzf.h5'])
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
np.random.RandomState(0)
(X, y, w, merges) = map(np.copy, map(np.ascontiguousarray,
g_train.learn_agglomerate(gt_train, fc)[0]))
print X.shape
np.savez('example-data/train-set.npz', X=X, y=y)
y = y[:, 0]
rf = classify.DefaultRandomForest()
X.shape
np.random.RandomState(0)
rf = rf.fit(X, y)
classify.save_classifier(rf, 'example-data/rf-1.joblib')
def setUp(self):
test_idxs = range(1,5)
self.num_tests = len(test_idxs)
fns = [rundir+'/test-%02i-probabilities.h5'%i for i in test_idxs]
self.probs = [ imio.read_h5_stack(fn) for fn in fns ]
self.results = [
imio.read_h5_stack(rundir+'/test-%02i-watershed.h5'%i)
for i in test_idxs
]
self.landscape = numpy.array([1,0,1,2,1,3,2,0,2,4,1,0])
def gen_watershed(self):
from gala import imio
import numpy
from skimage import morphology as skmorph
from scipy.ndimage import label
self.datadir = os.path.abspath(os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/"
prediction = imio.read_image_stack(self.datadir +"pixelprobs.h5",
group='/volume/prediction', single_channel=False)
boundary = prediction[...,0]
seeds = label(boundary==0)[0]
supervoxels = skmorph.watershed(boundary, seeds)
return supervoxels, boundary, prediction
def auto(session_location, options, master_logger):
master_logger.info("Reading gt_stack")
gt_stack = imio.read_image_stack(options.gt_stack)
master_logger.info("Reading test_stack")
test_stack = imio.read_image_stack(options.test_stack)
master_logger.info("Finished reading stacks")
master_logger.info("Loading graph json")
pairprob_list = load_graph_json(options.ragprob_file)
master_logger.info("Finished loading graph json")
master_logger.info("Matching bodies to GT")
body2gtbody = find_gt_bodies(gt_stack, test_stack)
master_logger.info("Finished matching bodies to GT")
body2body = {}
for (node1, node2, dummy) in pairprob_list:
# run boundary prediction -- produces a prediction file
if options.gen_pixel:
prediction_file = pixel.gen_pixel_probabilities(session_location, options, master_logger,
options.image_stack)
else:
prediction_file = options.pixelprob_file
# generate supervoxels -- produces supervoxels and output as appropriate
supervoxels = None
prediction = None
if options.gen_supervoxels:
supervoxels, prediction = gen_supervoxels(options, prediction_file, master_logger)
elif options.supervoxels_file:
master_logger.info("Reading supervoxels: " + options.supervoxels_file)
supervoxels = imio.read_image_stack(options.supervoxels_file)
#supervoxels = imio.read_mapped_segmentation(options.supervoxels_file)
master_logger.info("Finished reading supervoxels")
# write superpixels out to hdf5 and/or raveler files
sps_out = None
image_stack = None
if options.raveler_output:
image_stack = imio.read_image_stack(options.image_stack)
if options.h5_output:
imio.write_image_stack(supervoxels,
session_location + "/" + options.supervoxels_name)
"""
if supervoxels is not None:
if options.h5_output:
def tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def grab_pred_seg(pred_name, seg_name, border_size):
prediction = imio.read_image_stack(pred_name,
group=PREDICTIONS_HDF5_GROUP)
segmentation = imio.read_mapped_segmentation(seg_name)
segmentation = segmentation.transpose((2,1,0))
if border_size > 0:
prediction = prediction[border_size:(-1*border_size), border_size:(-1*border_size), border_size:(-1*border_size)]
segmentation = segmentation[border_size:(-1*border_size), border_size:(-1*border_size), border_size:(-1*border_size)]
return prediction, segmentation
master_logger.info("Finished agglomeration to threshold " + str(threshold)
+ " with " + str(agglom_stack.number_of_nodes()))
if options.inclusion_removal:
inclusion_removal(agglom_stack, master_logger)
segmentation = agglom_stack.get_segmentation()
if options.h5_output:
imio.write_image_stack(segmentation,
session_location+"/agglom-"+str(threshold)+".lzf.h5", compression='lzf')
md5hex = hashlib.md5(' '.join(sys.argv)).hexdigest()
file_base = os.path.abspath(session_location)+"/seg_data/seg-"+str(threshold) + "-" + md5hex + "-"
transforms = imio.compute_sp_to_body_map(supervoxels, segmentation)
seg_loc = file_base +"v1.h5"
if not os.path.exists(session_location+"/seg_data"):
os.makedirs(session_location+"/seg_data")
imio.write_mapped_segmentation(supervoxels, transforms, seg_loc)
if options.synapse_file is not None:
h5temp = h5py.File(seg_loc, 'a')
syn_data = json.load(open((options.synapse_file)))
meta = syn_data['metadata']
meta['username'] = "auto"
syn_data_str = json.dumps(syn_data, indent=4)
str_type = h5py.new_vlen(str)
ds = h5temp.create_dataset("synapse-annotations", data=syn_data_str, shape=(1,), dtype=str_type)
graph_loc = file_base+"graphv1.json"