Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Cop = pylops.signalprocessing.Convolve2D(Nz * Nx, h=h,
offset=(nh[0] // 2,
nh[1] // 2),
dims=(Nz, Nx), dtype='float32')
###############################################################################
# We first apply the blurring operator to the sharp image. We then
# try to recover the sharp input image by inverting the convolution operator
# from the blurred image. Note that when we perform inversion without any
# regularization, the deblurred image will show some ringing due to the
# instabilities of the inverse process. Using a L1 solver with a DWT
# preconditioner or TV regularization allows to recover sharper contrasts.
imblur = Cop * im.flatten()
imdeblur = \
pylops.optimization.leastsquares.NormalEquationsInversion(Cop, None,
imblur,
maxiter=50)
Wop = pylops.signalprocessing.DWT2D((Nz, Nx), wavelet='haar', level=3)
Dop = [pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=0, edge=False),
pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=1, edge=False)]
DWop = Dop + [Wop, ]
imdeblurfista = \
pylops.optimization.sparsity.FISTA(Cop * Wop.H, imblur, eps=1e-1,
niter=100)[0]
imdeblurfista = Wop.H * imdeblurfista
imdeblurtv = \
pylops.optimization.sparsity.SplitBregman(Cop, Dop, imblur.flatten(),
niter_outer=10, niter_inner=5,
axs[2].axis('tight')
fig.tight_layout()
###############################################################################
# Finally we take advantage of our different solvers and try to invert the
# modelling operator both in a least-squares sense and using TV-reg.
Dop = [
pylops.FirstDerivative(ny * nx, dims=(nx, ny), dir=0, edge=True,
kind='backward', dtype=np.float),
pylops.FirstDerivative(ny * nx, dims=(nx, ny), dir=1, edge=True,
kind='backward', dtype=np.float)]
D2op = pylops.Laplacian(dims=(nx, ny), edge=True, dtype=np.float)
# L2
xinv_sm = \
pylops.optimization.leastsquares.RegularizedInversion(RLop.H,
[D2op],
y.T.flatten(),
epsRs=[1e1],
**dict(iter_lim=20))
xinv_sm = np.real(xinv_sm.reshape(nx, ny)).T
# TV
mu = 1.5
lamda = [1., 1.]
niter = 3
niterinner = 4
xinv, niter = pylops.optimization.sparsity.SplitBregman(RLop.H, Dop, y.T.flatten(), niter, niterinner,
mu=mu, epsRL1s=lamda, tol=1e-4, tau=1., show=False,
**dict(iter_lim=20, damp=1e-2))
xinv = np.real(xinv.reshape(nx, ny)).T
plt.plot(t, x, '.k', ms=20, label='all samples')
plt.plot(t, ymask, '.g', ms=15, label='available samples')
plt.legend()
plt.title('Data restriction')
###############################################################################
# Back to our first cost function, this can be very easily implemented using the
# :math:`/` for PyLops operators, which will automatically call the
# :py:func:`scipy.sparse.linalg.lsqr` with some default parameters.
xinv = Rop / y
###############################################################################
# We could also use :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`
# (without regularization term for now) and customize our solvers using ``kwargs``.
xinv = \
pylops.optimization.leastsquares.RegularizedInversion(Rop, [], y,
**dict(damp=0, iter_lim=10, show=1))
###############################################################################
# And finally select a different starting guess from the null vector
xinv_fromx0 = \
pylops.optimization.leastsquares.RegularizedInversion(Rop, [], y,
x0=np.ones(N),
**dict(damp=0, iter_lim=10, show=0))
###############################################################################
# The cost function above can be also expanded in terms of its *normal equations*
#
# .. math::
# \mathbf{x}_{ne}= (\mathbf{R}^H \mathbf{R}^H)^{-1} \mathbf{R}^H \mathbf{y}
#
# The method :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion` implements
y = Rop * x
y1 = Rop.mask(x)
###############################################################################
# We will now use two different routines from our optimization toolbox
# to estimate our original image in the regular grid.
xcg_reg_lop = \
pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y,
epsRs=[np.sqrt(0.1)],
returninfo=False,
**dict(maxiter=200))
# Invert for interpolated signal, lsqrt
xlsqr_reg_lop, istop, itn, r1norm, r2norm = \
pylops.optimization.leastsquares.RegularizedInversion(Rop, [D2op], y,
epsRs=[np.sqrt(0.1)],
returninfo=True,
**dict(damp=0,
iter_lim=200,
show=0))
# Reshape estimated images
im_sampled = y1.reshape((Nz, Nx))
im_rec_lap_cg = xcg_reg_lop.reshape((Nz, Nx))
im_rec_lap_lsqr = xlsqr_reg_lop.reshape((Nz, Nx))
###############################################################################
# Finally we visualize the original image, the reconstructed images and
# their error
fig, axs = plt.subplots(1, 4, figsize=(12, 4))
# norm-p (:math:`p \le 1`) and the problem is generally recasted by considering
# the model to be sparse in some domain. We can follow this philosophy as
# our signal to invert was actually created as superposition of 3 sinusoids
# (i.e., three spikes in the Fourier domain). Our new cost function is:
#
# .. math::
# J_1 = ||\mathbf{y} - \mathbf{R} \mathbf{F} \mathbf{p}||_2^2 +
# \epsilon ||\mathbf{p}||_1
#
# where :math:`\mathbf{F}` is the FFT operator. We will thus use the
# :py:class:`pylops.optimization.sparsity.ISTA` and
# :py:class:`pylops.optimization.sparsity.FISTA` solvers to estimate our input
# signal.
pista, niteri, costi = \
pylops.optimization.sparsity.ISTA(Rop*FFTop.H, y, niter=1000,
eps=0.1, tol=1e-7, returninfo=True)
xista = FFTop.H*pista
pfista, niterf, costf = \
pylops.optimization.sparsity.FISTA(Rop*FFTop.H, y, niter=1000,
eps=0.1, tol=1e-7, returninfo=True)
xfista = FFTop.H*pfista
fig, axs = plt.subplots(2, 1, figsize=(12, 8))
fig.suptitle('Data reconstruction with sparsity', fontsize=14,
fontweight='bold', y=0.9)
axs[0].plot(f, np.abs(X), 'k', lw=3)
axs[0].plot(f, np.abs(pista), '--r', lw=3)
axs[0].plot(f, np.abs(pfista), '--g', lw=3)
axs[0].set_xlim(0, 30)
axs[0].set_title('Frequency domain')
# L2
xinv_sm = \
pylops.optimization.leastsquares.RegularizedInversion(RLop.H,
[D2op],
y.T.flatten(),
epsRs=[1e1],
**dict(iter_lim=20))
xinv_sm = np.real(xinv_sm.reshape(nx, ny)).T
# TV
mu = 1.5
lamda = [1., 1.]
niter = 3
niterinner = 4
xinv, niter = pylops.optimization.sparsity.SplitBregman(RLop.H, Dop, y.T.flatten(), niter, niterinner,
mu=mu, epsRL1s=lamda, tol=1e-4, tau=1., show=False,
**dict(iter_lim=20, damp=1e-2))
xinv = np.real(xinv.reshape(nx, ny)).T
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].imshow(x, vmin=0, vmax=1, cmap='gray')
axs[0].set_title('Model')
axs[0].axis('tight')
axs[1].imshow(xinv_sm, vmin=0, vmax=1, cmap='gray')
axs[1].set_title('L2 Inversion')
axs[1].axis('tight')
axs[2].imshow(xinv, vmin=0, vmax=1, cmap='gray')
axs[2].set_title('TV-Reg Inversion')
axs[2].axis('tight')
fig.tight_layout()
# We can also write a preconditioned problem, whose cost function is
#
# .. math::
# J= ||\mathbf{y} - \mathbf{R} \mathbf{P} \mathbf{p}||_2
#
# where :math:`\mathbf{P}` is the precondioned operator, :math:`\mathbf{p}` is the projected model
# in the preconditioned space, and :math:`\mathbf{x}=\mathbf{P}\mathbf{p}` is the model in
# the original model space we want to solve for. Note that a preconditioned problem converges
# much faster to its solution than its corresponding regularized problem. This can be done
# using the routine :py:func:`pylops.optimization.leastsquares.PreconditionedInversion`.
# Create regularization operator
Sop = pylops.Smoothing1D(nsmooth=11, dims=[N], dtype='float64')
# Invert for interpolated signal
xprec = pylops.optimization.leastsquares.PreconditionedInversion(Rop, Sop, y,
returninfo=False,
**dict(damp=np.sqrt(1e-9),
iter_lim=20, show=0))
###############################################################################
# Let's finally visualize these solutions
# sphinx_gallery_thumbnail_number=3
fig = plt.figure(figsize=(15, 5))
plt.plot(t[iava], y, '.k', ms=20, label='available samples')
plt.plot(t, x, 'k', lw=3, label='original')
plt.plot(t, xne, 'b', lw=3, label='normal equations')
plt.plot(t, xreg, '--r', lw=3, label='regularized')
plt.plot(t, xprec, '--g', lw=3, label='preconditioned equations')
plt.legend()
plt.title('Data reconstruction with regularization')
h, th, hcenter = pylops.utils.wavelets.ricker(t[:101], f0=20)
Cop = pylops.signalprocessing.Convolve1D(nt, h=h, offset=hcenter,
dtype='float32')
y = Cop*x
yn = y + np.random.normal(0, 0.1, y.shape)
# noise free
xls = Cop / y
xomp, nitero, costo = \
pylops.optimization.sparsity.OMP(Cop, y, niter_outer=200, sigma=1e-8)
xista, niteri, costi = \
pylops.optimization.sparsity.ISTA(Cop, y, niter=400, eps=5e-1,
tol=1e-8, returninfo=True)
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(t, x, 'k', lw=8, label=r'$x$')
ax.plot(t, y, 'r', lw=4, label=r'$y=Ax$')
ax.plot(t, xls, '--g', lw=4, label=r'$x_{LS}$')
ax.plot(t, xomp, '--b', lw=4, label=r'$x_{OMP} (niter=%d)$' % nitero)
ax.plot(t, xista, '--m', lw=4, label=r'$x_{ISTA} (niter=%d)$' % niteri)
ax.set_title('Noise-free deconvolution', fontsize=14, fontweight='bold')
ax.legend()
plt.tight_layout()
# noisy
xls = \
pylops.optimization.leastsquares.RegularizedInversion(Cop, [], yn,