Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
epsRs=[1e-8], x0=x0,
returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight
xinv = NormalEquationsInversion(Gop, None, y, Weight=Weigth, epsI=0,
x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small regularization
xinv = NormalEquationsInversion(Gop, [Reg], y, Weight=Weigth, epsI=0,
epsRs=[1e-8], x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small normal regularization
xinv = NormalEquationsInversion(Gop, [], y, NRegs=[NReg],
Weight=Weigth, epsI=0,
epsNRs=[1e-8], x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
Reg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
NReg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
Weigth = Diagonal(np.ones(par['ny']), dtype=par['dtype'])
x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
x0 = np.random.normal(0, 10, par['nx']) + \
par['imag']*np.random.normal(0, 10, par['nx']) if par['x0'] else None
y = Gop * x
# normal equations with regularization
xinv = NormalEquationsInversion(Gop, [Reg], y, epsI=0,
epsRs=[1e-8], x0=x0,
returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight
xinv = NormalEquationsInversion(Gop, None, y, Weight=Weigth, epsI=0,
x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small regularization
xinv = NormalEquationsInversion(Gop, [Reg], y, Weight=Weigth, epsI=0,
epsRs=[1e-8], x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small normal regularization
xinv = NormalEquationsInversion(Gop, [], y, NRegs=[NReg],
Weight=Weigth, epsI=0,
epsNRs=[1e-8], x0=x0, returninfo=False,
**dict(maxiter=200, tol=1e-10))
assert_array_almost_equal(x, xinv, decimal=3)
x0=np.ones(N),
**dict(damp=0, iter_lim=10, show=0))
###############################################################################
# The cost function above can be also expanded in terms of its *normal equations*
#
# .. math::
# \mathbf{x}_{ne}= (\mathbf{R}^H \mathbf{R}^H)^{-1} \mathbf{R}^H \mathbf{y}
#
# The method :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion` implements
# such system of equations explicitly and solves them using an iterative scheme suitable
# for square matrices (i.e., :math:`M=N`). While this approach may seem not very useful,
# we will soon see how regularization terms could be easily added to the
# normal equations using this method.
xne = pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [], y)
###############################################################################
# Let's visualize the different inversion results
fig = plt.figure(figsize=(15, 5))
plt.plot(t, x, 'k', lw=2, label='original')
plt.plot(t, xinv, 'b', ms=10, label='inversion')
plt.plot(t, xinv_fromx0, '--r', ms=10, label='inversion from x0')
plt.plot(t, xne, '--g', ms=10, label='normal equations')
plt.legend()
plt.title('Data reconstruction without regularization')
###############################################################################
# Regularization
# ~~~~~~~~~~~~~~
# You may have noticed that none of the inversion has been successfull in recovering
def _IRLS_data(Op, data, nouter, threshR=False, epsR=1e-10,
epsI=1e-10, x0=None, tolIRLS=1e-10,
returnhistory=False, **kwargs_solver):
r"""Iteratively reweighted least squares with L1 data term
"""
if x0 is not None:
data = data - Op * x0
if returnhistory:
xinv_hist = np.zeros((nouter + 1, Op.shape[1]))
rw_hist = np.zeros((nouter + 1, Op.shape[0]))
# first iteration (unweighted least-squares)
xinv = NormalEquationsInversion(Op, None, data, epsI=epsI,
returninfo=False,
**kwargs_solver)
r = data - Op * xinv
if returnhistory:
xinv_hist[0] = xinv
for iiter in range(nouter):
# other iterations (weighted least-squares)
xinvold = xinv.copy()
if threshR:
rw = 1. / np.maximum(np.abs(r), epsR)
else:
rw = 1. / (np.abs(r) + epsR)
rw = rw / rw.max()
R = Diagonal(rw)
xinv = NormalEquationsInversion(Op, [], data, Weight=R,
epsI=epsI,
pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y,
epsI=epsI,
epsRs=[epsR],
returninfo=False,
**dict(maxiter=50))
###############################################################################
# Note that in case we have access to a fast implementation for the chain of
# forward and adjoint for the regularization operator
# (i.e., :math:`\nabla^T\nabla`), we can modify our call to
# :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion` as
# follows:
ND2op = pylops.MatrixMult((D2op.H * D2op).tosparse()) # mimic fast D^T D
xne1 = \
pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [], y,
NRegs=[ND2op],
epsI=epsI,
epsNRs=[epsR],
returninfo=False,
**dict(maxiter=50))
###############################################################################
# We can do the same while using
# :py:func:`pylops.optimization.leastsquares.RegularizedInversion`
# which solves the following augmented problem
#
# .. math::
# \begin{bmatrix}
# \mathbf{R} \\
# \epsilon_\nabla \nabla
# \end{bmatrix} \mathbf{x} =
# :py:func:`pylops.optimization.leastsquares.RegularizedInversion`)
# * preconditioning via :py:func:`pylops.optimization.leastsquares.PreconditionedInversion`
#
# Let's start by regularizing the normal equations using a second derivative operator
#
# .. math::
# \mathbf{x} = (\mathbf{R^TR}+\epsilon_\nabla^2\nabla^T\nabla)^{-1} \mathbf{R^Ty}
# Create regularization operator
D2op = pylops.SecondDerivative(N, dims=None, dtype='float64')
# Regularized inversion
epsR = np.sqrt(0.1)
epsI = np.sqrt(1e-4)
xne = pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y, epsI=epsI,
epsRs=[epsR], returninfo=False,
**dict(maxiter=50))
###############################################################################
# We can do the same while using :py:func:`pylops.optimization.leastsquares.RegularizedInversion`
# which solves the following augmented problem
#
# .. math::
# \begin{bmatrix}
# \mathbf{R} \\
# \epsilon_\nabla \nabla
# \end{bmatrix} \mathbf{x} =
# \begin{bmatrix}
# \mathbf{y} \\
# 0
# \end{bmatrix}
iava = np.sort(np.random.permutation(np.arange(N))[:Nsub2d])
# Create operators and data
Rop = pylops.Restriction(N, iava, dtype='float64')
D2op = pylops.Laplacian((Nz, Nx), weights=(1, 1), dtype='float64')
x = im.flatten()
y = Rop * x
y1 = Rop.mask(x)
###############################################################################
# We will now use two different routines from our optimization toolbox
# to estimate our original image in the regular grid.
xcg_reg_lop = \
pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y,
epsRs=[np.sqrt(0.1)],
returninfo=False,
**dict(maxiter=200))
# Invert for interpolated signal, lsqrt
xlsqr_reg_lop, istop, itn, r1norm, r2norm = \
pylops.optimization.leastsquares.RegularizedInversion(Rop, [D2op], y,
epsRs=[np.sqrt(0.1)],
returninfo=True,
**dict(damp=0,
iter_lim=200,
show=0))
# Reshape estimated images
im_sampled = y1.reshape((Nz, Nx))
im_rec_lap_cg = xcg_reg_lop.reshape((Nz, Nx))
###############################################################################
# The cost function above can be also expanded in terms of
# its *normal equations*
#
# .. math::
# \mathbf{x}_{ne}= (\mathbf{R}^T \mathbf{R})^{-1}
# \mathbf{R}^T \mathbf{y}
#
# The method :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`
# implements such system of equations explicitly and solves them using an
# iterative scheme suitable for square matrices (i.e., :math:`M=N`).
#
# While this approach may seem not very useful, we will soon see how
# regularization terms could be easily added to the normal equations using
# this method.
xne = pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [], y)
###############################################################################
# Let's now visualize the different inversion results
fig = plt.figure(figsize=(12, 4))
plt.plot(t, x, 'k', lw=2, label='original')
plt.plot(t, xinv, 'b', ms=10, label='inversion')
plt.plot(t, xinv_fromx0, '--r', ms=10, label='inversion from x0')
plt.plot(t, xne, '--g', ms=10, label='normal equations')
plt.legend()
plt.title('Data reconstruction without regularization')
###############################################################################
# Regularization
# ~~~~~~~~~~~~~~
# You may have noticed that none of the inversion has been successfull in
# recovering the original signal. This is a clear indication that