How to use the pylops.optimization.leastsquares.RegularizedInversion function in pylops

To help you get started, we’ve selected a few pylops examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github equinor / pylops / pytests / test_leastsquares.py View on Github external
(par['ny'], par['nx'])).astype('float32')
    Gop = MatrixMult(G, dtype=par['dtype'])
    Reg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
    Weigth = Diagonal(np.ones(par['ny']), dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
    x0 = np.random.normal(0, 10, par['nx']) + \
         par['imag']*np.random.normal(0, 10, par['nx']) if par['x0'] else None
    y = Gop*x

    # regularized inversion with regularization
    xinv = RegularizedInversion(Gop, [Reg], y, epsRs=[1e-8], x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
    # regularized inversion with weight
    xinv = RegularizedInversion(Gop, None, y, Weight=Weigth,
                                x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
    # regularized inversion with regularization
    xinv = RegularizedInversion(Gop, [Reg], y, Weight=Weigth,
                                epsRs=[1e-8], x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
github equinor / pylops / pytests / test_leastsquares.py View on Github external
np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag'] * np.random.normal(0, 10, (par['ny'], par['nx'])).astype(
            'float32')
    Gop = MatrixMult(G, dtype=par['dtype'])
    w = np.arange(par['ny'])
    w1 = np.sqrt(w)
    Weigth = Diagonal(w, dtype=par['dtype'])
    Weigth1 = Diagonal(w1, dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag'] * np.ones(par['nx'])
    y = Gop * x

    xne = NormalEquationsInversion(Gop, None, y, Weight=Weigth,
                                   returninfo=False,
                                   **dict(maxiter=5, tol=1e-10))
    xreg = RegularizedInversion(Gop, None, y, Weight=Weigth1,
                                returninfo=False,
                                **dict(damp=0, iter_lim=5, show=0))
    print(xne)
    print(xreg)
    assert_array_almost_equal(xne, xreg, decimal=3)
github equinor / pylops / tutorials / optimization.py View on Github external
###############################################################################
# We can do the same while using :py:func:`pylops.optimization.leastsquares.RegularizedInversion`
# which solves the following augmented problem
#
#   .. math::
#       \begin{bmatrix}
#           \mathbf{R}    \\
#           \epsilon_\nabla \nabla
#       \end{bmatrix} \mathbf{x} =
#           \begin{bmatrix}
#           \mathbf{y}    \\
#           0
#       \end{bmatrix}

xreg = pylops.optimization.leastsquares.RegularizedInversion(Rop, [D2op], y,
                                                             epsRs=[np.sqrt(0.1)], returninfo=False,
                                                             **dict(damp=np.sqrt(1e-4),
                                                                  iter_lim=50, show=0))

###############################################################################
# We can also write a preconditioned problem, whose cost function is
#
#   .. math::
#       J= ||\mathbf{y} - \mathbf{R} \mathbf{P} \mathbf{p}||_2
#
# where :math:`\mathbf{P}` is the precondioned operator, :math:`\mathbf{p}` is the projected model
# in the preconditioned space, and :math:`\mathbf{x}=\mathbf{P}\mathbf{p}` is the model in
# the original model space we want to solve for. Note that a preconditioned problem converges
# much faster to its solution than its corresponding regularized problem. This can be done
# using the routine :py:func:`pylops.optimization.leastsquares.PreconditionedInversion`.
github equinor / pylops / pylops / avo / poststack.py View on Github external
PPop_reg = MatrixMult(PP, dims=nspatprod)
                minv = lstsq(PPop_reg.A, datarn.flatten(), **kwargs_solver)[0]
        else:
            # solve unregularized normal equations simultaneously with lop
            minv = lsqr(PPop, datar, **kwargs_solver)[0]
    else:
        if epsRL1 is None:
            # L2 inversion with spatial regularization
            if dims == 1:
                Regop = SecondDerivative(nt0, dtype=PPop.dtype)
            elif dims == 2:
                Regop = Laplacian((nt0, nx), dtype=PPop.dtype)
            else:
                Regop = Laplacian((nt0, nx, ny), dirs=(1, 2), dtype=PPop.dtype)

            minv = RegularizedInversion(PPop, [Regop], data.flatten(),
                                        x0=None if m0 is None else m0.flatten(),
                                        epsRs=[epsR], returninfo=False,
                                        **kwargs_solver)
        else:
            # Blockiness-promoting inversion with spatial regularization
            if dims == 1:
                RegL1op = FirstDerivative(nt0, dtype=PPop.dtype)
                RegL2op = None
            elif dims == 2:
                RegL1op = FirstDerivative(nt0*nx, dims=(nt0, nx),
                                          dir=0, dtype=PPop.dtype)
                RegL2op = SecondDerivative(nt0*nx, dims=(nt0, nx),
                                           dir=1, dtype=PPop.dtype)
            else:
                RegL1op = FirstDerivative(nt0*nx*ny, dims=(nt0, nx, ny),
                                          dir=0, dtype=PPop.dtype)
github equinor / pylops / examples / plot_wavest.py View on Github external
axs[2].set_xlabel(r'$\Theta$')

###############################################################################
# We can invert the data. First we will consider noise-free data, subsequently
# we will add some noise and add a regularization terms in the inversion
# process to obtain a well-behaved wavelet also under noise conditions.
wav_est = Wavesop / d.T.flatten()
wav_phase_est = Wavesop_phase / d_phase.T.flatten()
wavn_est = Wavesop / dn.T.flatten()

# Create regularization operator
D2op = pylops.SecondDerivative(ntwav, dtype='float64')

# Invert for wavelet
wavn_reg_est, istop, itn, r1norm, r2norm = \
    pylops.optimization.leastsquares.RegularizedInversion(Wavesop, [D2op], dn.T.flatten(),
                                                          epsRs=[np.sqrt(0.1)], returninfo=True,
                                                          **dict(damp=np.sqrt(1e-4),
                                                                 iter_lim=200, show=0))

###############################################################################
# As expected, the regularization helps to retrieve a smooth wavelet
# even under noisy conditions.

# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
axs[0].plot(wav, 'k', lw=6, label='True')
axs[0].plot(wav_est, '--r', lw=4, label='Estimated (noise-free)')
axs[0].plot(wavn_est, '--g', lw=4, label='Estimated (noisy)')
axs[0].plot(wavn_reg_est, '--m', lw=4, label='Estimated (noisy regularized)')
axs[0].set_title('Zero-phase wavelet')
axs[0].grid()
github equinor / pylops / examples / plot_ista.py View on Github external
tol=1e-8, returninfo=True)

fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(t, x, 'k', lw=8, label=r'$x$')
ax.plot(t, y, 'r', lw=4, label=r'$y=Ax$')
ax.plot(t, xls, '--g', lw=4, label=r'$x_{LS}$')
ax.plot(t, xomp, '--b', lw=4, label=r'$x_{OMP} (niter=%d)$' % nitero)
ax.plot(t, xista, '--m', lw=4, label=r'$x_{ISTA} (niter=%d)$' % niteri)

ax.set_title('Noise-free deconvolution', fontsize=14, fontweight='bold')
ax.legend()
plt.tight_layout()

# noisy
xls = \
    pylops.optimization.leastsquares.RegularizedInversion(Cop, [], yn,
                                                          returninfo=False,
                                                          **dict(damp=1e-1,
                                                                 atol=1e-3,
                                                                 iter_lim=100,
                                                                 show=0))

xista, niteri, costi = \
    pylops.optimization.sparsity.ISTA(Cop, yn, niter=100, eps=5e-1,
                                      tol=1e-5, returninfo=True)

xfista, niterf, costf = \
    pylops.optimization.sparsity.FISTA(Cop, yn, niter=100, eps=5e-1,
                                       tol=1e-5, returninfo=True)

fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(t, x, 'k', lw=8, label=r'$x$')