Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import numpy as np
from gala import imio, classify, features, agglo, evaluate as ev
gt_train, pr_train, p4_train, ws_train = map(imio.read_h5_stack, ['example-data/train-gt.lzf.h5', 'example-data/train-p1.lzf.h5', 'example-data/train-p4.lzf.h5', 'example-data/train-ws.lzf.h5'])
gt_test, pr_test, p4_test, ws_test = map(imio.read_h5_stack, ['example-data/test-gt.lzf.h5', 'example-data/test-p1.lzf.h5', 'example-data/test-p4.lzf.h5', 'example-data/test-ws.lzf.h5'])
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
np.random.RandomState(0)
(X, y, w, merges) = map(np.copy, map(np.ascontiguousarray,
g_train.learn_agglomerate(gt_train, fc)[0]))
print X.shape
np.savez('example-data/train-set.npz', X=X, y=y)
y = y[:, 0]
rf = classify.DefaultRandomForest()
X.shape
np.random.RandomState(0)
rf = rf.fit(X, y)
classify.save_classifier(rf, 'example-data/rf-1.joblib')
learned_policy = agglo.classifier_probability(fc, rf)
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
g_test.agglomerate(0.5)
seg_test1 = g_test.get_segmentation()
# imports
from gala import imio, classify, features, agglo, evaluate as ev
# read in training data
gt_train, pr_train, ws_train = (map(imio.read_h5_stack,
['train-gt.lzf.h5', 'train-p1.lzf.h5',
'train-ws.lzf.h5']))
# create a feature manager
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
# create graph and obtain a training dataset
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
(X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0]
y = y[:, 0] # gala has 3 truth labeling schemes, pick the first one
print((X.shape, y.shape)) # standard scikit-learn input format
# train a classifier, scikit-learn syntax
rf = classify.DefaultRandomForest().fit(X, y)
# a policy is the composition of a feature map and a classifier
learned_policy = agglo.classifier_probability(fc, rf)
# get the test data and make a RAG with the trained policy
pr_test, ws_test = (map(imio.read_h5_stack,
['test-p1.lzf.h5', 'test-ws.lzf.h5']))
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
# load example data
train_list = ['example-data/train-gt.lzf.h5', 'example-data/train-p1.lzf.h5',
'example-data/train-p4.lzf.h5', 'example-data/train-ws.lzf.h5']
train_list = [os.path.join(rundir, fn) for fn in train_list]
gt_train, pr_train, p4_train, ws_train = map(imio.read_h5_stack, train_list)
test_list = ['example-data/test-gt.lzf.h5', 'example-data/test-p1.lzf.h5',
'example-data/test-p4.lzf.h5', 'example-data/test-ws.lzf.h5']
test_list = [os.path.join(rundir, fn) for fn in test_list]
gt_test, pr_test, p4_test, ws_test = map(imio.read_h5_stack, test_list)
# prepare feature manager
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
### helper functions
def load_pickle(fn):
with open(fn, 'rb') as fin:
if PYTHON_VERSION == 3:
return pickle.load(fin, encoding='bytes', fix_imports=True)
else: # Python 2
return pickle.load(fin)
def load_training_data(fn):
io = np.load(fn)
X, y = io['X'], io['y']
if y.ndim > 1:
elif type(a1) == np.ndarray and type(a2) == np.ndarray:
assert_allclose(a1, a2, atol=eps)
elif type(a1) == float and type(a2) == float:
assert_approx_equal(a1, a2, int(-np.log10(eps)))
else:
assert_equal(a1, a2)
probs2 = np.load(os.path.join(rundir, 'toy-data/test-04-probabilities.npy'))
probs1 = probs2[..., 0]
wss1 = np.loadtxt(os.path.join(rundir, 'toy-data/test-04-watershed.txt'),
np.uint32)
f1, f2, f3 = (features.moments.Manager(2, False),
features.histogram.Manager(3, compute_percentiles=[0.5]),
features.squiggliness.Manager(ndim=2))
f4 = features.base.Composite(children=[f1, f2, f3])
def run_matched(f, fn, c=1,
edges=[(1, 2), (6, 3), (7, 4)],
merges=[(1, 2), (6, 3)]):
p = probs1 if c == 1 else probs2
g = agglo.Rag(wss1, p, feature_manager=f, use_slow=True)
o = list_of_feature_arrays(g, f, edges, merges)
with open(fn, 'rb') as fin:
r = pck.load(fin, encoding='bytes')
assert_equal_lists_or_arrays(o, r)
def test_1channel_moment_features():
f = f1
run_matched(f, os.path.join(rundir,
def setUp(self):
self.probs2 = imio.read_h5_stack(rundir+'/test-05-probabilities.h5')
self.probs1 = self.probs2[...,0]
self.wss1 = imio.read_h5_stack(rundir+'/test-05-watershed.h5')
self.f1, self.f2, self.f3 = features.moments.Manager(2, False), \
features.histogram.Manager(3,compute_percentiles=[0.5]), \
features.squiggliness.Manager(ndim=2)
self.f4 = features.base.Composite(children=[self.f1, self.f2, self.f3])
def create_fm(fm_info):
children = []
for feature in fm_info['feature_list']:
if feature == "histogram":
children.append(histogram.Manager.load_dict(fm_info[feature]))
elif feature == "moments":
children.append(moments.Manager.load_dict(fm_info[feature]))
elif feature == "inclusiveness":
children.append(inclusion.Manager.load_dict(fm_info[feature]))
else:
raise Exception("Feature " + feature + " not found")
if len(children) == 0:
raise RuntimeError("No features loaded")
if len(children) == 1:
return children[0]
return base.Composite(children=children)
def __init__(self, children=[], *args, **kwargs):
super(Composite, self).__init__()
self.children = children
def create_fm(fm_info):
children = []
for feature in fm_info['feature_list']:
if feature == "histogram":
children.append(histogram.Manager.load_dict(fm_info[feature]))
elif feature == "moments":
children.append(moments.Manager.load_dict(fm_info[feature]))
elif feature == "inclusiveness":
children.append(inclusion.Manager.load_dict(fm_info[feature]))
else:
raise Exception("Feature " + feature + " not found")
if len(children) == 0:
raise RuntimeError("No features loaded")
if len(children) == 1:
return children[0]
return base.Composite(children=children)