Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
xinv = \
pylops.optimization.leastsquares.RegularizedInversion(Iop, [D2op], y,
epsRs=[np.sqrt(lamda/2)],
**dict(iter_lim=30))
plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')
plt.legend()
plt.title('L2 inversion')
###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True, kind='backward')
mu = 0.01
lamda = 0.3
niter_out = 50
niter_in = 3
xinv, niter = \
pylops.optimization.sparsity.SplitBregman(Iop, [Dop], y, niter_out,
niter_in, mu=mu, epsRL1s=[lamda],
tol=1e-4, tau=1.,
**dict(iter_lim=30, damp=1e-10))
plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')
plt.legend()
###############################################################################
# We now create a signal filled with zero and a single one at its center and
# apply the derivative matrix by means of a dot product
x = np.zeros(nx)
x[int(nx/2)] = 1
y_dir = np.dot(D, x)
xadj_dir = np.dot(D.T, y_dir)
###############################################################################
# Let's now do the same using the :py:class:`pylops.FirstDerivative` operator
# and compare its outputs after applying the forward and adjoint operators
# to those from the dense matrix.
D1op = pylops.FirstDerivative(nx, dtype='float32')
y_lop = D1op*x
xadj_lop = D1op.H*y_lop
fig, axs = plt.subplots(3, 1, figsize=(13, 8))
axs[0].stem(np.arange(nx), x, linefmt='k', markerfmt='ko')
axs[0].set_title('Input', size=20, fontweight='bold')
axs[1].stem(np.arange(nx), y_dir, linefmt='k', markerfmt='ko', label='direct')
axs[1].stem(np.arange(nx), y_lop, linefmt='--r', markerfmt='ro', label='lop')
axs[1].set_title('Forward', size=20, fontweight='bold')
axs[1].legend()
axs[2].stem(np.arange(nx), xadj_dir, linefmt='k',
markerfmt='ko', label='direct')
axs[2].stem(np.arange(nx), xadj_lop, linefmt='--r',
markerfmt='ro', label='lop')
axs[2].set_title('Adjoint', size=20, fontweight='bold')
###############################################################################
# We first apply the blurring operator to the sharp image. We then
# try to recover the sharp input image by inverting the convolution operator
# from the blurred image. Note that when we perform inversion without any
# regularization, the deblurred image will show some ringing due to the
# instabilities of the inverse process. Using a L1 solver with a DWT
# preconditioner or TV regularization allows to recover sharper contrasts.
imblur = Cop * im.flatten()
imdeblur = \
pylops.optimization.leastsquares.NormalEquationsInversion(Cop, None,
imblur,
maxiter=50)
Wop = pylops.signalprocessing.DWT2D((Nz, Nx), wavelet='haar', level=3)
Dop = [pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=0, edge=False),
pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=1, edge=False)]
DWop = Dop + [Wop, ]
imdeblurfista = \
pylops.optimization.sparsity.FISTA(Cop * Wop.H, imblur, eps=1e-1,
niter=100)[0]
imdeblurfista = Wop.H * imdeblurfista
imdeblurtv = \
pylops.optimization.sparsity.SplitBregman(Cop, Dop, imblur.flatten(),
niter_outer=10, niter_inner=5,
mu=1.5, epsRL1s=[2e0, 2e0],
tol=1e-4, tau=1., show=False,
** dict(iter_lim=5, damp=1e-4))[0]
imdeblurtv1 = \
axs[0].axis('tight')
axs[1].imshow(y, cmap='gray')
axs[1].set_title('Data')
axs[1].axis('tight')
axs[2].imshow(xrec, cmap='gray')
axs[2].set_title('Adjoint model')
axs[2].axis('tight')
fig.tight_layout()
###############################################################################
# Finally we take advantage of our different solvers and try to invert the
# modelling operator both in a least-squares sense and using TV-reg.
Dop = [
pylops.FirstDerivative(ny * nx, dims=(nx, ny), dir=0, edge=True,
kind='backward', dtype=np.float),
pylops.FirstDerivative(ny * nx, dims=(nx, ny), dir=1, edge=True,
kind='backward', dtype=np.float)]
D2op = pylops.Laplacian(dims=(nx, ny), edge=True, dtype=np.float)
# L2
xinv_sm = \
pylops.optimization.leastsquares.RegularizedInversion(RLop.H,
[D2op],
y.T.flatten(),
epsRs=[1e1],
**dict(iter_lim=20))
xinv_sm = np.real(xinv_sm.reshape(nx, ny)).T
# TV
mu = 1.5
lamda = [1., 1.]
niter = 3
###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
# interesting. Any operator can in fact be applied on a single direction of a
# multi-dimensional input array if combined with an :py:class:`pylops.Identity`
# operator via Kronecker product. We apply here the
# :py:class:`pylops.FirstDerivative` to the second dimension of the model.
#
# Note that for those operators whose implementation allows their application
# to a single axis via the ``dir`` parameter, using the Kronecker product
# would lead to slower performance. Nevertheless, the Kronecker product allows
# any other operator to be applied to a single dimension.
Nv, Nh = 11, 21
Iop = pylops.Identity(Nv, dtype='float32')
D2hop = pylops.FirstDerivative(Nh, dtype='float32')
X = np.zeros((Nv, Nh))
X[Nv//2, Nh//2] = 1
D2hop = pylops.Kronecker(Iop, D2hop)
Y = D2hop*X.ravel()
Y = Y.reshape(Nv, Nh)
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle('Kronecker', fontsize=14,
fontweight='bold', y=0.95)
im = axs[0].imshow(X, interpolation='nearest')
axs[0].axis('tight')
axs[0].set_title(r'$x$')
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation='nearest')
axs[1].plot(t, x, 'k', lw=8, label='original')
axs[1].plot(t[1:-1], xder[1:-1], 'r', lw=5, label='numerical')
axs[1].plot(t, xinv, '--g' , lw=3, label='inverted')
axs[1].legend()
axs[1].set_title('Inverse causal integration = Derivative')
###############################################################################
# As expected we obtain the same result. Let's see what happens if we now
# add some random noise to our data.
# Add noise
yn = y + np.random.normal(0, 4e-1, y.shape)
# Numerical derivative
Dop = pylops.FirstDerivative(nt, sampling=dt)
xder = Dop*yn
# Regularized derivative
Rop = pylops.SecondDerivative(nt)
xreg = pylops.RegularizedInversion(Cop, [Rop], yn, epsRs=[1e0],
**dict(iter_lim=100, atol=1e-5))
# Preconditioned derivative
Sop = pylops.Smoothing1D(41, nt)
xp = pylops.PreconditionedInversion(Cop, Sop, yn,
**dict(iter_lim=10, atol=1e-3))
# Visualize data and inversion
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, y, 'k' , LineWidth=3, label='data')
axs[0].plot(t, yn, '--g' , LineWidth=3, label='noisy data')
def _PoststackLinearModelling(wav, nt0, spatdims=None, explicit=False,
sparse=False, _MatrixMult=MatrixMult,
_Convolve1D=Convolve1D,
_FirstDerivative=FirstDerivative,
args_MatrixMult={}, args_Convolve1D={},
args_FirstDerivative={}):
"""Post-stack linearized seismic modelling operator.
Used to be able to provide operators from different libraries to
PoststackLinearModelling. It operates in the same way as public method
(PoststackLinearModelling) but has additional input parameters allowing
passing a different operator and additional arguments to be passed to such
operator.
"""
if len(wav.shape) == 2 and wav.shape[0] != nt0:
raise ValueError('Provide 1d wavelet or 2d wavelet composed of nt0 '
'wavelets')
# organize dimensions
minv = RegularizedInversion(PPop, [Regop], data.flatten(),
x0=None if m0 is None else m0.flatten(),
epsRs=[epsR], returninfo=False,
**kwargs_solver)
else:
# Blockiness-promoting inversion with spatial regularization
if dims == 1:
RegL1op = FirstDerivative(nt0, dtype=PPop.dtype)
RegL2op = None
elif dims == 2:
RegL1op = FirstDerivative(nt0*nx, dims=(nt0, nx),
dir=0, dtype=PPop.dtype)
RegL2op = SecondDerivative(nt0*nx, dims=(nt0, nx),
dir=1, dtype=PPop.dtype)
else:
RegL1op = FirstDerivative(nt0*nx*ny, dims=(nt0, nx, ny),
dir=0, dtype=PPop.dtype)
RegL2op = Laplacian((nt0, nx, ny), dirs=(1, 2),
dtype=PPop.dtype)
if 'mu' in kwargs_solver.keys():
mu = kwargs_solver['mu']
kwargs_solver.pop('mu')
else:
mu = 1.
if 'niter_outer' in kwargs_solver.keys():
niter_outer = kwargs_solver['niter_outer']
kwargs_solver.pop('niter_outer')
else:
niter_outer = 3
if 'niter_inner' in kwargs_solver.keys():
niter_inner = kwargs_solver['niter_inner']