Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
m.__setitem__(coord, x)
y = float(sum(mul_elemwise(R, op_cls(*args))).data)
m.__setitem__(coord, old_x)
return y
return rval
self.failUnless(hasattr(op_cls, 'update_gradient'), op_cls)
op_out = op_cls(*args)
if len(op_out.owner.outputs) > 1:
raise NotImplementedError('cant autotest gradient of op with multiple outputs')
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -jsb.
R = numpy.random.rand(*op_out.shape)
y = sum(mul_elemwise(R, op_out))
g = gradient.grad(y)
def abs_rel_err(a,b):
return abs( (a-b) / (a+b+eps))
for idx in range(len(args)):
#print 'aaaaaaa', op_cls, [i.shape for i in args]
g_i = g(args[idx])
if g_i is gradient.Undefined:
continue
if args[idx].shape == ():
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx),
args[idx].data, eps, y.data)
err = abs_rel_err(fd_grad,g_i.data)
self.failUnless( err < tol, (err, op_cls, idx))
elif len(args[idx].shape) == 1:
for i in xrange(args[idx].shape[0]):
if len(op_out.owner.outputs) > 1:
raise NotImplementedError('cant autotest gradient of op with multiple outputs')
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -jsb.
R = numpy.random.rand(*op_out.shape)
y = sum(mul_elemwise(R, op_out))
g = gradient.grad(y)
def abs_rel_err(a,b):
return abs( (a-b) / (a+b+eps))
for idx in range(len(args)):
#print 'aaaaaaa', op_cls, [i.shape for i in args]
g_i = g(args[idx])
if g_i is gradient.Undefined:
continue
if args[idx].shape == ():
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx),
args[idx].data, eps, y.data)
err = abs_rel_err(fd_grad,g_i.data)
self.failUnless( err < tol, (err, op_cls, idx))
elif len(args[idx].shape) == 1:
for i in xrange(args[idx].shape[0]):
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx, (i,)),
args[idx].data[i], eps, y.data)
err = abs_rel_err(fd_grad,g_i.data[i])
self.failUnless( abs(err) < tol, (err, op_cls, idx, i))
elif len(args[idx].shape) == 2:
for i in xrange(args[idx].shape[0]):
for j in xrange(args[idx].shape[1]):
fd_grad = _finite_diff1(_scalar_f(op_cls, args, R, idx, (i,j)),
class retNone(gof.op.Op):
def make_node(self, *inputs):
outputs = [gof.generic()]
return gof.Apply(self, inputs, outputs)
def grad(self, inputs, (gz, )):
return [None]
i = gof.generic()
j = gof.generic()
a1 = retNone().make_node(i)
g = grad_sources_inputs([(a1.out, 1)], None)
a2 = retNone().make_node(i,j)
try:
g = grad_sources_inputs([(a2.out, 1)], None)
except ValueError, e:
self.failUnless(e[0] is gradient._msg_badlen)
return
self.fail()
def test_retNone1(self):
"""Test that it is not ok to return None from op.grad()"""
class retNone(gof.op.Op):
def make_node(self):
inputs = [gof.generic()]
outputs = [gof.generic()]
return gof.Apply(self, inputs, outputs)
def grad(self, (x, ), (gz, )):
pass
a = retNone().make_node()
try:
grad_sources_inputs([(a.out, 1)], None)
except ValueError, e:
self.failUnless(e[0] is gradient._msg_retType)
return
self.fail()
def test_retNone1_b(self):
if px >= hi:
thres[i][j] = strong
strongs.append((i, j))
elif px >= lo:
thres[i][j] = weak
return thres, strongs
if __name__ == '__main__':
from sys import argv
if len(argv) < 2:
print "Usage: python %s <img>" % argv[0]
exit()
im = array(Image.open(argv[1]))
im = im[:, :, 0]
gim = gaussian(im)
grim, gphase = gradient(gim)
gmax = maximum(grim, gphase)
thres = thresholding(gmax)
gray()
subplot(1, 2, 1)
imshow(im)
axis('off')
title('Original')
subplot(1, 2, 2)
imshow(thres[0])
axis('off')
title('Double thresholding')
show()
gmax[i][j] = det[i][j]
# 135 degrees
if (phase[i][j] >= 112.5 and phase[i][j] < 157.5) or (phase[i][j] >= 292.5 and phase[i][j] < 337.5):
if det[i][j] >= det[i - 1][j - 1] and det[i][j] >= det[i + 1][j + 1]:
gmax[i][j] = det[i][j]
return gmax
if __name__ == '__main__':
from sys import argv
if len(argv) < 2:
print "Usage: python %s <img>" % argv[0]
exit()
im = array(Image.open(argv[1]))
im = im[:, :, 0]
gim = gaussian(im)
grim, gphase = gradient(gim)
gmax = maximum(grim, gphase)
gray()
subplot(2, 2, 1)
imshow(im)
axis('off')
title('Original')
subplot(2, 2, 2)
imshow(gim)
axis('off')
title('Gaussian')
subplot(2, 2, 3)
imshow(grim)
def split(self,file):
text = open(file).read()
t = self.translateGradient(text)
for grad in t.children:
t = translate.GradientFunc(grad)
g = gradient.Gradient()
g.load_ugr(t)
out_name = g.name + ".ggr"
f = open("gradients/" + out_name, "w")
print >>f, g.serialize()
f.close()
def output(cursor, by_binary_list, whitelist_avg, blacklist_avg):
""" Print out a report for the output of malfunction
cursor - database cursor
by_binary_list - list of strong matching binaries
whitelist_avg - the whitelist score
blacklist_avg - the blacklist score"""
score = whitelist_avg - blacklist_avg
print("Whitelist Average: " + str(whitelist_avg))
print("Blacklist Average: " + str(blacklist_avg))
print(" Score: " + str(score))
gradient.gradient(score)
possible_filenames = []
possible_authors = []
comments = []
for binary_id in by_binary_list:
cursor.execute("SELECT author,filenames,comment FROM "
"binaries WHERE binaryID=?", (binary_id, ))
binary_entry = cursor.fetchone()
if binary_entry[0] not in possible_authors and binary_entry[0]:
possible_authors.append(binary_entry[0])
if binary_entry[1] not in possible_filenames and binary_entry[1]:
possible_filenames.append(binary_entry[1])
if binary_entry[2] not in comments and binary_entry[2]:
comments.append(binary_entry[2])
if possible_authors:
print("***Possible Authors of this binary***")
## STEP 2: Implement softmaxCost
#
# Implement softmaxCost in softmaxCost.m.
(cost, grad) = softmax.softmax_cost(theta, num_classes, input_size, lambda_, input_data, labels)
##======================================================================
## STEP 3: Gradient checking
#
# As with any learning algorithm, you should always check that your
# gradients are correct before learning the parameters.
#
if debug:
J = lambda x: softmax.softmax_cost(x, num_classes, input_size, lambda_, input_data, labels)
num_grad = gradient.compute_gradient(J, theta)
# Use this to visually compare the gradients side by side
print num_grad, grad
# Compare numerically computed gradients with the ones obtained from backpropagation
diff = np.linalg.norm(num_grad - grad) / np.linalg.norm(num_grad + grad)
print diff
print "Norm of the difference between numerical and analytical num_grad (should be < 1e-7)\n\n"
##======================================================================
## STEP 4: Learning parameters
#
# Once you have verified that your gradients are correct,
# you can start training your softmax regression code using softmaxTrain
# (which uses minFunc).
# To speed up gradient checking, we will use a reduced network and some
# dummy patches
debug_hidden_size = 5
debug_visible_size = 8
patches = np.random.rand(8, 10)
theta = sparse_autoencoder.initialize(debug_hidden_size, debug_visible_size)
cost, grad = sparse_autoencoder.sparse_autoencoder_linear_cost(theta, debug_visible_size, debug_hidden_size,
lambda_, sparsity_param, beta, patches)
# Check gradients
J = lambda x: sparse_autoencoder.sparse_autoencoder_linear_cost(x, debug_visible_size, debug_hidden_size,
lambda_, sparsity_param, beta, patches)
num_grad = gradient.compute_gradient(J, theta)
print grad, num_grad
# Compare numerically computed gradients with the ones obtained from backpropagation
diff = np.linalg.norm(num_grad - grad) / np.linalg.norm(num_grad + grad)
print diff
print "Norm of the difference between numerical and analytical num_grad (should be < 1e-9)\n\n"
##======================================================================
## STEP 2: Learn features on small patches
# In this step, you will use your sparse autoencoder (which now uses a
# linear decoder) to learn features on small patches sampled from related
# images.
## STEP 2a: Load patches
# In this step, we load 100k patches sampled from the STL10 dataset and