Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Returns
-------
illum : np.ndarray, float, shape (M, N)
The estimated illumination over the image field.
See Also
--------
`correct_image_illumination`, `correct_multiimage_illumination`.
"""
# this function follows the "PyToolz" streaming data model to
# obtain the illumination estimate.
# first, define the functions for each individual step:
in_range = ('image' if input_bitdepth is None
else (0, 2**input_bitdepth - 1))
rescale = tz.curry(exposure.rescale_intensity)
normalize = (tz.partial(stretchlim, bottom=stretch_quantile)
if stretch_quantile > 0
else skimage.img_as_float)
# produce a stream of properly-scaled images
ims = (tz.pipe(fn, io.imread, rescale(in_range=in_range), normalize)
for fn in fns)
# take the mean of that stream
mean_image = mean(ims)
# return the median filter of that mean
radius = radius or min(mean_image.shape) // 4
illum = ndi.percentile_filter(mean_image, percentile=(quantile * 100),
footprint=morphology.disk(radius))
return illum
for lay in rbm.layers:
lay.params.loc[:] = be.rand_like(lay.params.loc)
state, cop1_GFE = rbm._compute_StateTAP_self_consistent()
grad = rbm._grad_gibbs_free_energy(state)
gu.grad_normalize_(grad)
for i in range(100):
lr = 0.1
gogogo = True
random_grad = gu.random_grad(rbm)
gu.grad_normalize_(random_grad)
while gogogo:
cop1 = deepcopy(rbm)
lr_mul = partial(be.tmul, lr)
cop1.parameter_update(gu.grad_apply(lr_mul, grad))
cop1_state, cop1_GFE = cop1._compute_StateTAP_self_consistent()
cop2 = deepcopy(rbm)
cop2.parameter_update(gu.grad_apply(lr_mul, random_grad))
cop2_state, cop2_GFE = cop2._compute_StateTAP_self_consistent()
regress = cop2_GFE - cop1_GFE < 0.0
if regress:
if lr < 1e-6:
assert False, \
"TAP FE gradient is not working properly for Gaussian models"
break
else:
Create an optimizer object:
Args:
model: a BoltzmannMachine object to optimize
stepsize (generator; optional): the stepsize schedule
tolerance (float; optional):
the gradient magnitude to declar convergence
Returns:
Optimizer
"""
self.stepsize = stepsize
self.tolerance = tolerance
self.delta = {}
self.lr_ = partial(be.tmul_, stepsize)
def update_lr(self):
"""
Update the current value of the stepsize:
Notes:
Modifies stepsize attribute in place.
Args:
None
Returns:
None
"""
lr = be.float_scalar(next(self.stepsize))
self.lr_ = partial(be.tmul_, lr)
def minimize_gibbs_free_energy_GD(m=None, init_lr=0.01, tol=1e-6, max_iters=1, terms=2):
"""
Simple gradient descent routine to minimize gibbs_FE
"""
mag = deepcopy(m)
eps = 1e-6
its = 0
gam = self.gibbs_free_energy(mag)
lr = init_lr
clip_ = partial(be.clip_inplace, a_min=eps, a_max=1.0-eps)
lr_ = partial(be.tmul_, be.float_scalar(lr))
#print(gam)
while (its < max_iters):
its += 1
grad = self.grad_magnetization_GFE(mag)
for g in grad:
be.apply_(lr_, g)
m_provisional = [be.mapzip(be.subtract, grad[l], mag[l]) for l in range(self.num_layers)]
# Warning: in general a lot of clipping gets done here
for m_l in m_provisional:
be.apply_(clip_, m_l)
#HACK:
for l in range(self.num_layers):
m_provisional[l].c[:] = m_provisional[l].a - be.square(m_provisional[l].a)
def grad_normalize_(grad):
"""
Normalize the gradient vector with respect to the L2 norm
Args:
grad (Gradient)
Return:
None
"""
nrm = grad_norm(grad)
grad_apply_(partial(be.tmul_, be.float_scalar(1.0/nrm)), grad)
for g in grad:
be.apply_(lr_, g)
m_provisional = [be.mapzip(be.subtract, grad[l], mag[l]) for l in range(self.num_layers)]
# Warning: in general a lot of clipping gets done here
for m_l in m_provisional:
be.apply_(clip_, m_l)
#HACK:
for l in range(self.num_layers):
m_provisional[l].c[:] = m_provisional[l].a - be.square(m_provisional[l].a)
gam_provisional = self.gibbs_free_energy(m_provisional)
if (gam - gam_provisional < 0):
lr *= 0.5
lr_ = partial(be.tmul_, be.float_scalar(lr))
#print("decreased lr" + str(its))
if (lr < 1e-10):
#print("tol reached on iter" + str(its))
break
elif (gam - gam_provisional < tol):
break
else:
#print(gam - gam_provisional)
mag = m_provisional
gam = gam_provisional
return (mag, gam)
grad = self._TAP_magnetization_grad(state)
for g in grad:
be.apply_(lr_, g)
# take a gradient step to compute a new state
new_state = mu.StateTAP([
self.layers[l].clip_magnetization(
be.mapzip(be.subtract, grad[l], state.cumulants[l])
)
for l in range(self.num_layers)])
# compute the new free energy and perform an update
new_free_energy = self.gibbs_free_energy(new_state)
if free_energy - new_free_energy < 0:
# the step was too large, halve the learning rate
lr *= decrease
lr_ = partial(be.tmul_, be.float_scalar(lr))
if lr < 1e-10:
break
elif free_energy - new_free_energy < tol:
break
else:
state = new_state
free_energy = new_free_energy
return state
# compute minimizing magnetizations from seeded initializations
for s in range(num_p): # persistent seeds
(self.persistent_samples[s],EMF) = \
self.TAP_free_energy(self.persistent_samples[s],
self.init_lr_EMF,
self.tolerance_EMF,
self.max_iters_EMF,
self.terms)
# Compute the gradients at this minimizing magnetization
grad_gfe = self.grad_gibbs_free_energy(self.persistent_samples[s])
def accum_(x,y): x[:] = be.add(x,y)
gu.grad_mapzip_(accum_, grad_EMF, grad_gfe)
# average
scale = partial(be.tmul_, be.float_scalar(1/(num_p + num_r)))
gu.grad_apply_(scale, grad_EMF)
return grad_EMF
def serializable_unsigned_transaction_from_dict(transaction_dict):
assert_valid_fields(transaction_dict)
filled_transaction = pipe(
transaction_dict,
dict,
partial(merge, TRANSACTION_DEFAULTS),
chain_id_to_v,
apply_formatters_to_dict(TRANSACTION_FORMATTERS),
)
if 'v' in filled_transaction:
serializer = Transaction
else:
serializer = UnsignedTransaction
return serializer.from_dict(filled_transaction)