Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if np.all(np.isnan(val)):
continue
if var in [FVAL, CHI2]:
assert np.isclose(
val, fun(x_full),
), var
elif var in [RES]:
# note that we can expect slight deviations here since
# this res is computed without sensitivities while the
# result here may be computed with with sensitivies
# activated. If this fails to often, increase atol/rtol
assert np.allclose(
val, fun(x_full),
rtol=1e-3, atol=1e-4
), var
elif var in [SRES]:
assert np.allclose(
val, fun(x_full)[:, self.problem.x_free_indices],
), var
elif var in [GRAD, SCHI2]:
assert np.allclose(
val, self.problem.get_reduced_vector(fun(x_full)),
), var
elif var in [HESS]:
assert np.allclose(
val, self.problem.get_reduced_matrix(fun(x_full)),
), var
else:
raise RuntimeError('missing test implementation')
res = self.res(x)
result = {RES: res}
elif sensi_orders == (1,):
if self.sres is True:
sres = self.res(x)[1]
else:
sres = self.sres(x)
result = {SRES: sres}
elif sensi_orders == (0, 1):
if self.sres is True:
res, sres = self.res(x)
else:
res = self.res(x)
sres = self.sres(x)
result = {RES: res,
SRES: sres}
else:
raise ValueError("These sensitivity orders are not supported.")
return result
x_ids,
par_sim_ids,
condition_map_sim_var,
rdata['sres'],
coefficient=1.0
)
sres = np.vstack([sres, opt_sres]) \
if sres.size else opt_sres
ret = {
FVAL: nllh,
CHI2: chi2,
GRAD: snllh,
HESS: s2nllh,
RES: res,
SRES: sres,
RDATAS: rdatas
}
return {
key: val
for key, val in ret.items()
if val is not None
}
# create table row
row = pd.Series(name=len(self._trace),
index=self._trace.columns,
dtype='object')
values = {
TIME: used_time,
N_FVAL: self._n_fval,
N_GRAD: self._n_grad,
N_HESS: self._n_hess,
N_RES: self._n_res,
N_SRES: self._n_sres,
FVAL: ret[FVAL],
RES: ret[RES],
SRES: ret[SRES],
CHI2: ret[CHI2],
HESS: ret[HESS],
}
for var, val in values.items():
row[(var, float('nan'))] = val
for var, val in {X: x, GRAD: ret[GRAD], SCHI2: ret[SCHI2]}.items():
if var == X or self.options[f'trace_record_{var}']:
row[var] = val
else:
row[(var, float('nan'))] = np.NaN
self._trace = self._trace.append(row)
# save trace to file
Returns values indicative of an error, that is with nan entries in all
vectors, and a function value, i.e. nllh, of `np.inf`.
"""
if not amici_model.nt():
nt = sum([data.nt() for data in edatas])
else:
nt = sum([data.nt() if data.nt() else amici_model.nt()
for data in edatas])
n_res = nt * amici_model.nytrue
return {
FVAL: np.inf,
GRAD: np.nan * np.ones(dim),
HESS: np.nan * np.ones([dim, dim]),
RES: np.nan * np.ones(n_res),
SRES: np.nan * np.ones([n_res, dim]),
RDATAS: rdatas
}
def postprocess(self, result: Dict) -> Dict:
"""Constrain results to optimization parameter dimensions."""
result = super().postprocess(result)
if result.get(GRAD, None) is not None:
grad = result[GRAD]
if grad.size == self.dim_full:
grad = grad[self.x_free_indices]
result[GRAD] = grad
if result.get(HESS, None) is not None:
hess = result[HESS]
if hess.shape[0] == self.dim_full:
hess = hess[np.ix_(self.x_free_indices, self.x_free_indices)]
result[HESS] = hess
if result.get(SRES, None) is not None:
sres = result[SRES]
if sres.shape[-1] == self.dim_full:
sres = sres[..., self.x_free_indices]
result[SRES] = sres
return result
def __init__(self, options: Union[HistoryOptions, Dict] = None):
super().__init__(options=options)
self._trace_keys = {X, FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2, TIME}
self._trace: Dict[str, Any] = {key: [] for key in self._trace_keys}
if result.get(GRAD, None) is not None:
grad = result[GRAD]
if grad.size == self.dim_full:
grad = grad[self.x_free_indices]
result[GRAD] = grad
if result.get(HESS, None) is not None:
hess = result[HESS]
if hess.shape[0] == self.dim_full:
hess = hess[np.ix_(self.x_free_indices, self.x_free_indices)]
result[HESS] = hess
if result.get(SRES, None) is not None:
sres = result[SRES]
if sres.shape[-1] == self.dim_full:
sres = sres[..., self.x_free_indices]
result[SRES] = sres
return result
}
# extract rdatas and flatten
result[RDATAS] = []
for rval in rvals:
if RDATAS in rval:
result[RDATAS].extend(rval[RDATAS])
# initialize res and sres
if RES in rvals[0]:
res = np.asarray(rvals[0][RES])
else:
res = None
if SRES in rvals[0]:
sres = np.asarray(rvals[0][SRES])
else:
sres = None
# skip iobj=0 after initialization, stack matrices
for rval in rvals[1:]:
if res is not None:
res = np.hstack([res, np.asarray(rval[RES])])
if sres is not None:
sres = np.vstack([sres, np.asarray(rval[SRES])])
# fill res, sres into result
if res is not None:
result[RES] = res
if sres is not None:
result[SRES] = sres
# WLS = np.sqrt(WLS)
WLS = compute_WLS(optimal_surrogate, self.problem, edatas, rdatas)
print('cost function: ' + str(WLS))
#TODO: gradient computation
#if sensi_order > 0:
# snllh = compute_snllh(edatas, rdatas, optimal_scalings, obj.x_ids, obj.mapping_par_opt_to_par_sim, obj.dim)
# TODO compute FIM or HESS
# TODO RES, SRES should also be possible, right?
return {
FVAL: WLS,
GRAD: snllh,
HESS: s2nllh,
RES: res,
SRES: sres,
RDATAS: rdatas
}