Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# duplicate last row to later fill in annotation
# positions for display
if self.key_names[i] in self._key_names_with_annotation:
self.__dict__[self.key_names[i]].append(
self.__dict__[self.key_names[i]][-1])
self.__dict__[self.key_names[i]] = \
np.asarray(self.__dict__[self.key_names[i]])
except:
utils.print_warning('no data for %s' % fn, 'load',
'CMADataLogger')
# convert single line to matrix of shape (1, len)
for key in self.key_names:
try:
d = getattr(self, key)
except AttributeError:
utils.print_warning("attribute %s missing" % key, 'load',
'CMADataLogger')
continue
if len(d.shape) == 1: # one line has shape (8, )
setattr(self, key, d.reshape((1, len(d))))
return self
"""update the isotropic evolution path.
Using ``es`` attributes ``mean``, ``mean_old``, ``sigma``,
``sigma_vec``, ``sp.weights.mueff``, ``cp.cmean`` and
``sm.transform_inverse``.
:type es: CMAEvolutionStrategy
"""
if not self.is_initialized_base:
self.initialize_base(es)
if self._ps_updated_iteration == es.countiter:
return
try:
if es.countiter <= es.sm.itereigenupdated:
# es.B and es.D must/should be those from the last iteration
utils.print_warning('distribution transformation (B and D) have been updated before ps could be computed',
'_update_ps', 'CMAAdaptSigmaBase', verbose=es.opts['verbose'])
except AttributeError:
pass
z = es.sm.transform_inverse((es.mean - es.mean_old) / es.sigma_vec.scaling)
# assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *
# np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec.scaling)))
z *= es.sp.weights.mueff**0.5 / es.sigma / es.sp.cmean
self.ps = (1 - self.cs) * self.ps + (self.cs * (2 - self.cs))**0.5 * z
self._ps_updated_iteration = es.countiter
def hsig(self, es):
# list of rows to append another row latter
with warnings.catch_warnings():
if self.file_names[i] == 'axlencorr':
warnings.simplefilter("ignore")
try:
self.__dict__[self.key_names[i]] = list(
np.loadtxt(fn, comments=['%', '#']))
except:
self.__dict__[self.key_names[i]] = list(
np.loadtxt(fn, comments='%'))
# read dict from tag in first line
with open(fn) as file:
self.persistent_communication_dict.update(
string_=file.readline())
except IOError:
utils.print_warning('reading from file "' + fn + '" failed',
'load', 'CMADataLogger')
try:
# duplicate last row to later fill in annotation
# positions for display
if self.key_names[i] in self._key_names_with_annotation:
self.__dict__[self.key_names[i]].append(
self.__dict__[self.key_names[i]][-1])
self.__dict__[self.key_names[i]] = \
np.asarray(self.__dict__[self.key_names[i]])
except:
utils.print_warning('no data for %s' % fn, 'load',
'CMADataLogger')
# convert single line to matrix of shape (1, len)
for key in self.key_names:
try:
d = getattr(self, key)
self._sortBD()
self.limit_condition()
try:
if not self.constant_trace:
s = 1
elif self.constant_trace in (1, True) or self.constant_trace.startswith(('ar', 'mean')):
s = 1 / np.mean(self.variances)
elif self.constant_trace.startswith(('geo')):
s = np.exp(-np.mean(np.log(self.variances)))
elif self.constant_trace.startswith('aeig'):
s = 1 / np.mean(self.D) # same as arith
elif self.constant_trace.startswith('geig'):
s = np.exp(-np.mean(np.log(self.D)))
else:
print_warning("trace normalization option setting '%s' not recognized (further warnings will be surpressed)" %
repr(self.constant_trace),
class_name='GaussFullSampler', maxwarns=1, iteration=self.count_eigen + 1)
s = 1
except AttributeError:
raise ValueError("Value '%s' not allowed for constant trace setting" % repr(self.constant_trace))
if s != 1:
self.C *= s
self.D *= s
self.D **= 0.5
assert all(np.isfinite(self.D))
self._inverse_root_C = None
# self.dC = np.diag(self.C)
if 11 < 3: # not needed for now
self.inverse_root_C = np.dot(self.B / self.D, self.B.T)
def inverse(self, x, *args, **kwargs):
"""evaluate the composition of inverses on ``x``.
Return `None`, if no list was provided.
"""
if self.list_of_inverses is None:
utils.print_warning("inverses were not given")
return
for i in range(len(self.list_of_inverses)):
x = self.list_of_inverses[i](x, *args, **kwargs)
return x
p = self.ps
if 'pc for ps' in es.opts['vv']:
# was: es.D**-1 * np.dot(es.B.T, es.pc)
p = es.sm.transform_inverse(es.pc)
if es.opts['CSA_squared']:
s = (sum(_square(p)) / es.N - 1) / 2
# sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed
# divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1
else:
s = _norm(p) / es.const.chiN - 1
s *= self.cs / self.damps
s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)
self.delta *= np.exp(s_clipped)
# "error" handling
if s_clipped != s:
utils.print_warning('sigma change np.exp(' + str(s) + ') = ' + str(np.exp(s)) +
' clipped to np.exp(+-' + str(self.max_delta_log_sigma) + ')',
'update',
'CMAAdaptSigmaCSA',
es.countiter, es.opts['verbose'])
return self.delta / delta_old
def update(self, es, **kwargs):
/ (es.pop[0][i] - es.mean_old[i])
dmi_div_dx1i = (es.mean[i] - es.mean_old[i]) \
/ (es.pop[1][i] - es.mean_old[i])
if not Mh.equals_approximately(
dmi_div_dx0i, dm / dx0, 1e-4) or \
not Mh.equals_approximately(
dmi_div_dx1i, dm / dx1, 1e-4):
utils.print_warning(
'TPA: apparent inconsistency with mirrored'
' samples, where dmi_div_dx0i, dm/dx0=%f, %f'
' and dmi_div_dx1i, dm/dx1=%f, %f' % (
dmi_div_dx0i, dm/dx0, dmi_div_dx1i, dm/dx1),
'check_consistency',
'CMAAdaptSigmaTPA', es.countiter)
else:
utils.print_warning('zero delta encountered in TPA which' +
' \nshould be very rare and might be a bug' +
' (sigma=%f)' % es.sigma,
'check_consistency', 'CMAAdaptSigmaTPA',
es.countiter)