Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Given the first values are the earliest, this is a useful policy
also with a time changing objective.
"""
## meta_parameters.noise_reeval_multiplier == 1.0
lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval
else 2 + len(fit) / 20)
lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand())
## meta_parameters.noise_choose_reeval == 1
choice = 1
if choice == 1:
# take n_first first and reev - n_first best of the remaining
n_first = lam_reev - lam_reev // 2
sort_idx = np.argsort(np.array(fit, copy=False)[n_first:]) + n_first
return np.array(list(range(0, n_first)) +
list(sort_idx[0:lam_reev - n_first]), copy=False)
elif choice == 2:
idx_sorted = np.argsort(np.array(fit, copy=False))
# take lam_reev equally spaced, starting with best
linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev)
return idx_sorted[[int(i) for i in linsp]]
# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.
elif choice == 3:
return np.argsort(np.array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev]
else:
raise ValueError('unrecognized choice value %d for noise reev'
% choice)
`CMAEvolutionStrategy` object instance, in particular
mean and variances and the methods from the attribute
`gp` of type `GenoPheno` are used.
"""
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return self
N = es.N
# ## prepare
# compute varis = sigma**2 * C_ii
if 11 < 3: # old
varis = es.sigma**2 * np.array(N * [es.C] if np.isscalar(es.C) else (# scalar case
es.C if np.isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in range(N)])) # full matrix case
else:
varis = es.sigma**2 * es.sm.variances
# relative violation in geno-space
dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
# ## Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if np.isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == np.inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
X_i = X[i]
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
for _k in range(evals)])
else:
self.fitre[i] = fagg([func(X_i, *args) for _k in range(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l + 1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p,1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l + 1] = e[l] * (p + r)
dl1 = d[l + 1]
h = g - d[l]
if not num_opt:
for i in range(l + 2, n):
d[i] -= h
else:
d[l + 2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l + 1]
s = 0.0
s2 = 0.0
# Sort eigenvalues and corresponding vectors.
if 11 < 3:
for i in range(n - 1): # (int i = 0; i < n-1; i++) {
k = i
p = d[i]
for j in range(i + 1, n): # (int j = i+1; j < n; j++) {
if d[j] < p: # NH find smallest k>i
k = j
p = d[j]
if k != i:
d[k] = d[i] # swap k and i
d[i] = p
for j in range(n): # (int j = 0; j < n; j++) {
p = V[j][i]
V[j][i] = V[j][k]
V[j][k] = p
# tql2
"""
if limit is None:
limit = self.condition_limit
elif limit <= 1:
raise ValueError("condition limit was %f<=1 but should be >1"
% limit)
if not np.isfinite(limit) or self.condition_number <= limit:
return
eps = (self.D[-1]**2 - limit * self.D[0]**2) / (limit - 1)
if eps <= 0: # should never happen, because cond > limit
raise RuntimeWarning("cond=%e, limit=%e, eps=%e" %
(self.condition_number, limit, eps))
return
for i in range(self.dimension):
self.C[i][i] += eps
self.D **= 2
self.D += eps
self.D **= 0.5
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i - 1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n - 1):
V[n - 1][i] = V[i][i]
V[i][i] = 1.0
h = d[i + 1]
if h != 0.0:
if not num_opt:
for k in range(i + 1):
d[k] = V[k][i + 1] / h
else:
d[:i + 1] = V.T[i + 1][:i + 1] / h
for j in range(i + 1):
if not num_opt:
g = 0.0
for k in range(i + 1):
g += V[k][i + 1] * V[k][j]
for k in range(i + 1):
# Check for convergence.
if abs(e[l]) <= eps * tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] = d[l] + f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
if 11 < 3:
for i in range(n - 1): # (int i = 0; i < n-1; i++) {
k = i
p = d[i]
for j in range(i + 1, n): # (int j = i+1; j < n; j++) {
if d[j] < p: # NH find smallest k>i
k = j
p = d[j]
if k != i:
d[k] = d[i] # swap k and i
d[i] = p
for j in range(n): # (int j = 0; j < n; j++) {
p = V[j][i]
V[j][i] = V[j][k]
V[j][k] = p
# tql2
def correlation_matrix(self):
"""return correlation matrix of the distribution.
"""
c = self.C.copy()
for i in range(c.shape[0]):
fac = c[i, i]**0.5
c[:, i] /= fac
c[i, :] /= fac
c = (c + c.T) / 2.0
return c
else:
e[:i] /= h
f += np.dot(e[:i], d[:i])
hh = f / (h + h)
if not num_opt:
for j in range(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in range(i):
f = d[j]
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i - 1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n - 1):
V[n - 1][i] = V[i][i]
V[i][i] = 1.0
h = d[i + 1]