How to use the sigpy.backend function in sigpy

To help you get started, we’ve selected a few sigpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mikgroup / sigpy / tests / test_linop.py View on Github external
import unittest
import pickle
import numpy as np
import numpy.testing as npt
from sigpy import backend, linop, util, config

if __name__ == '__main__':
    unittest.main()

dtypes = [np.float32, np.float64, np.complex64, np.complex128]
devices = [backend.cpu_device]
if config.cupy_enabled:
    devices.append(backend.Device(0))


class TestLinop(unittest.TestCase):

    def check_linop_unitary(self, A,
                            device=backend.cpu_device, dtype=np.float):
        device = backend.Device(device)
        x = util.randn(A.ishape, dtype=dtype, device=device)
        xp = device.xp
        with device:
            xp.testing.assert_allclose(A.H * A * x, x,
                                       atol=1e-5, rtol=1e-5)

    def check_linop_linear(self, A,
github mikgroup / sigpy / tests / test_conv.py View on Github external
def test_convolve_valid(self):
        mode = 'valid'
        devices = [backend.cpu_device]
        if config.cupy_enabled:
            devices.append(backend.Device(0))

        for device in devices:
            xp = device.xp
            with device:
                for dtype in [np.float32, np.float64,
                              np.complex64, np.complex128]:
                    x = util.dirac([1, 3], device=device, dtype=dtype)
                    W = xp.ones([1, 3], dtype=dtype)
                    y = backend.to_device(conv.convolve(
                        x, W, mode=mode), backend.cpu_device)
                    npt.assert_allclose(y, [[1]], atol=1e-5)

                    x = util.dirac([1, 3], device=device, dtype=dtype)
                    W = xp.ones([1, 2], dtype=dtype)
github mikgroup / sigpy / sigpy / pytorch.py View on Github external
if device.type == 'cpu':
            output = tensor.detach().numpy()
        else:
            if config.cupy_enabled:
                import cupy as cp
                output = cp.fromDlpack(to_dlpack(tensor))
            else:
                raise TypeError('CuPy not installed, '
                                'but trying to convert GPU PyTorch Tensor.')

        if iscomplex:
            if output.shape[-1] != 2:
                raise ValueError('shape[-1] must be 2 when iscomplex is '
                                 'specified, but got {}'.format(output.shape))

            with backend.get_device(output):
                if output.dtype == np.float32:
                    output = output.view(np.complex64)
                elif output.dtype == np.float64:
                    output = output.view(np.complex128)

                output = output.reshape(output.shape[:-1])

        return output
github mikgroup / sigpy / sigpy / alg.py View on Github external
def _update(self):
        # Update dual.
        util.axpy(self.u, self.sigma, self.A(self.x_ext))
        backend.copyto(self.u, self.proxfc(self.sigma, self.u))

        # Update primal.
        with self.x_device:
            x_old = self.x.copy()
            util.axpy(self.x, -self.tau, self.AH(self.u))
            backend.copyto(self.x, self.proxg(self.tau, self.x))

        # Update step-size if neccessary.
        if self.gamma_primal > 0 and self.gamma_dual == 0:
            with self.x_device:
                xp = self.x_device.xp
                theta = 1 / (1 + 2 * self.gamma_primal * self.tau_min)**0.5
                self.tau *= theta
                self.tau_min *= theta

            with self.u_device:
                self.sigma /= theta
        elif self.gamma_primal == 0 and self.gamma_dual > 0:
            with self.u_device:
                xp = self.u_device.xp
                theta = 1 / (1 + 2 * self.gamma_dual * self.sigma_min)**0.5
                self.sigma *= theta
github mikgroup / sigpy / sigpy / alg.py View on Github external
self.tol = tol

        self.A = A
        self.AH = AH

        self.u = u
        self.x = x

        self.tau = tau
        self.sigma = sigma
        self.theta = theta
        self.gamma_primal = gamma_primal
        self.gamma_dual = gamma_dual

        self.x_device = backend.get_device(x)
        self.u_device = backend.get_device(u)

        with self.x_device:
            self.x_ext = self.x.copy()

        if self.gamma_primal > 0:
            xp = self.x_device.xp
            with self.x_device:
                self.tau_min = xp.amin(xp.abs(tau)).item()

        if self.gamma_dual > 0:
            xp = self.u_device.xp
            with self.u_device:
                self.sigma_min = xp.amin(xp.abs(sigma)).item()

        self.resid = np.infty
github mikgroup / sigpy / sigpy / block.py View on Github external
input (array): input array of shape num_blks + blk_shape
        oshape (tuple): output shape.
        blk_shape (tuple): block shape. Must have same length as oshape.
        blk_strides (tuple): block strides. Must have same length as oshape.

    Returns:
        array: array of shape oshape.

    """
    ndim = len(blk_shape)

    if 2 * ndim != input.ndim or ndim != len(blk_strides):
        raise ValueError('Input must have same dimensions as blocks.')

    num_blks = input.shape[:ndim]
    device = backend.get_device(input)
    xp = device.xp
    with device:
        output = xp.zeros(oshape, dtype=input.dtype)

        if ndim == 1:
            if device == backend.cpu_device:
                _blocks_to_array1(output, input,
                                  blk_shape[-1],
                                  blk_strides[-1],
                                  num_blks[-1])
            else:  # pragma: no cover
                if np.issubdtype(input.dtype, np.floating):
                    _blocks_to_array1_cuda(input,
                                           blk_shape[-1],
                                           blk_strides[-1],
                                           num_blks[-1],
github mikgroup / sigpy / sigpy / linop.py View on Github external
def __mul__(self, input):
        if isinstance(input, Linop):
            return Compose([self, input])
        elif np.isscalar(input):
            M = Multiply(self.ishape, input)
            return Compose([self, M])
        elif isinstance(input, backend.get_array_module(input).ndarray):
            return self.apply(input)

        return NotImplemented
github mikgroup / sigpy / sigpy / util.py View on Github external
def randn(shape, scale=1, dtype=np.float, device=backend.cpu_device):
    """Create random Gaussian array.

    Args:
        shape (tuple of ints): Output shape.
        scale (float): Standard deviation.
        dtype (Dtype): Output data-type.
        device (Device): Output device.

    Returns:
        array: Random Gaussian array.

    """
    device = backend.Device(device)
    xp = device.xp

    with device:
        if np.issubdtype(dtype, np.complexfloating):
            real_dtype = np.array([], dtype=dtype).real.dtype
            real_shape = tuple(shape) + (2, )
            output = xp.random.normal(size=real_shape, scale=scale / 2**0.5)
            output = output.astype(real_dtype)
            output = output.view(dtype=dtype).reshape(shape)
            return output
        else:
            return xp.random.normal(size=shape, scale=scale).astype(dtype)
github mikgroup / sigpy / sigpy / interp.py View on Github external
Returns:
        output (array): Output array.

    References:
        https://en.wikipedia.org/wiki/Spline_wavelet#Cardinal_B-splines_of_small_orders
        http://people.math.sfu.ca/~cbm/aands/page_378.htm
    """
    ndim = coord.shape[-1]

    batch_shape = input.shape[:-ndim]
    batch_size = util.prod(batch_shape)

    pts_shape = coord.shape[:-1]
    npts = util.prod(pts_shape)

    xp = backend.get_array_module(input)

    input = input.reshape([batch_size] + list(input.shape[-ndim:]))
    coord = coord.reshape([npts, ndim])
    output = xp.zeros([batch_size, npts], dtype=input.dtype)

    if xp == np:
        _interpolate[kernel][ndim - 1](output, input, coord, width, param)
    else:  # pragma: no cover
        _interpolate_cuda[kernel][ndim - 1](
            input, coord, width, param, output, size=npts)

    return output.reshape(batch_shape + pts_shape)