Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return tau_corrected_s
def apply_penalty_randomized(self, tau_s):
# randomly multiply scale with [0, 1)
# avoid that all very big taus get mapped to a single value
save_scale_f = self.scale_f
rand = np.random.rand()
self.scale_f *= rand
tau_corrected_s = self.apply_penalty(tau_s)
self.scale_f = save_scale_f
return tau_corrected_s
class MultiHahnPGH(MultiPGH):
def __init__(self, updater, B_gauss, oplist=None, norm='Frobenius', inv_field='x_', t_field='t',
inv_func=identity,
t_func=identity,
maxiters=10,
other_fields=None
):
super().__init__(updater)
self._updater = updater
self._oplist = oplist
self._norm = norm
self._x_ = inv_field
self._t = t_field
self._inv_func = inv_func
self._t_func = t_func
self._maxiters = maxiters
eps[self._t] = 1 / minsingvalnorm(deltaH) # Min SingVal norm
elif self._norm == 'SingVal':
eps[self._t] = 1 / singvalnorm(deltaH) # Max SingVal
else:
eps[self._t] = 1 / np.linalg.norm(deltaH)
raise RuntimeError("Unknown Norm: using Frobenius norm instead")
for field, value in self._other_fields.items():
eps[field] = value
return eps
def norm_mean_projection(self, x, xp):
return 0
class T2RandPenalty_MultiPGH(MultiPGH):
def __init__(self, updater, tau_thresh_rescale, inv_field='x_', t_field='t',
inv_func=qi.expdesign.identity,
t_func=qi.expdesign.identity,
oplist=None,
maxiters=10,
other_fields=None, scale_f=2.0):
"""
Apply a penalty on taus calculated from stdPGH and rescale them to lower values.
:param tau_thresh_rescale: values above will be rescaled
:param scale_f: controls the cut off tau. tau_cut = tau_thresh_rescale + scale_f * tau_thresh_rescale
= 2 -> tau_max = 3*tau_thresh_rescale
"""
super().__init__(updater, oplist=oplist, inv_field=inv_field, t_field=t_field, inv_func=inv_func, t_func=t_func,
maxiters=maxiters, other_fields=other_fields)
self.tau_thresh_rescale = tau_thresh_rescale