How to use jaxlib - 10 common examples

To help you get started, we’ve selected a few jaxlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / jax / jaxlib / cusolver.py View on Github external
def getrf(c, a):
  """LU decomposition."""
  a_shape = c.GetShape(a)
  dtype = a_shape.element_type()
  dims = a_shape.dimensions()
  assert len(dims) >= 2
  m, n = dims[-2:]
  batch_dims = tuple(dims[:-2])
  num_bd = len(batch_dims)
  batch = _prod(batch_dims)

  if batch > 1 and m == n and m // batch <= 128:
    lwork, opaque = cublas_kernels.build_getrf_batched_descriptor(
      np.dtype(dtype), batch, m)
    workspace = _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,))
    kernel = b"cublas_getrf_batched"
  else:
    lwork, opaque = cusolver_kernels.build_getrf_descriptor(
        np.dtype(dtype), batch, m, n)
    workspace = _Shape.array_shape(dtype, (lwork,), (0,))
    kernel = b"cusolver_getrf"

  out = c.CustomCall(
      kernel,
      operands=(a,),
      shape_with_layout=_Shape.tuple_shape((
          _Shape.array_shape(
              dtype, batch_dims + (m, n),
              (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
github google / jax / jaxlib / cusolver.py View on Github external
a_shape = c.GetShape(a)
  dtype = a_shape.element_type()
  dims = a_shape.dimensions()
  assert len(dims) >= 2
  m, n = dims[-2:]
  batch_dims = tuple(dims[:-2])
  num_bd = len(batch_dims)
  batch = _prod(batch_dims)

  if batch > 1 and m == n and m // batch <= 128:
    lwork, opaque = cublas_kernels.build_getrf_batched_descriptor(
      np.dtype(dtype), batch, m)
    workspace = _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,))
    kernel = b"cublas_getrf_batched"
  else:
    lwork, opaque = cusolver_kernels.build_getrf_descriptor(
        np.dtype(dtype), batch, m, n)
    workspace = _Shape.array_shape(dtype, (lwork,), (0,))
    kernel = b"cusolver_getrf"

  out = c.CustomCall(
      kernel,
      operands=(a,),
      shape_with_layout=_Shape.tuple_shape((
          _Shape.array_shape(
              dtype, batch_dims + (m, n),
              (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
          _Shape.array_shape(
              np.dtype(np.int32), batch_dims + (min(m, n),),
              tuple(range(num_bd, -1, -1))),
          _Shape.array_shape(
              np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),
github google / jax / jax / lib / __init__.py View on Github external
# limitations under the License.

# This module is largely a wrapper around `jaxlib` that performs version
# checking on import.

import jaxlib

_minimum_jaxlib_version = (0, 1, 37)
try:
  from jaxlib import version as jaxlib_version
except:
  # jaxlib is too old to have version number.
  msg = 'This version of jax requires jaxlib version >= {}.'
  raise ImportError(msg.format('.'.join(map(str, _minimum_jaxlib_version))))

version = tuple(int(x) for x in jaxlib_version.__version__.split('.'))

# Check the jaxlib version before importing anything else from jaxlib.
def _check_jaxlib_version():
  if version < _minimum_jaxlib_version:
    msg = 'jaxlib is version {}, but this version of jax requires version {}.'

    if version == (0, 1, 23):
        msg += ('\n\nA common cause of this error is that you installed jaxlib '
                'using pip, but your version of pip is too old to support '
                'manylinux2010 wheels. Try running:\n\n'
                'pip install --upgrade pip\n'
                'pip install --upgrade jax jaxlib\n')
    raise ValueError(msg.format('.'.join(map(str, version)),
                                '.'.join(map(str, _minimum_jaxlib_version))))

_check_jaxlib_version()
github google / jax / jaxlib / cusolver.py View on Github external
def gesvd(c, a, full_matrices=True, compute_uv=True):
  """Singular value decomposition."""

  a_shape = c.GetShape(a)
  dtype = a_shape.element_type()
  b = 1
  m, n = a_shape.dimensions()
  singular_vals_dtype = _real_type(dtype)

  if m < n:
    lwork, opaque = cusolver_kernels.build_gesvd_descriptor(
        np.dtype(dtype), b, n, m, compute_uv, full_matrices)
    out = c.CustomCall(
        b"cusolver_gesvd",
        operands=(a,),
        shape_with_layout=_Shape.tuple_shape((
            _Shape.array_shape(dtype, (m, n), (1, 0)),
            _Shape.array_shape(np.dtype(singular_vals_dtype), (min(m, n),), (0,)),
            _Shape.array_shape(dtype, (n, n), (1, 0)),
            _Shape.array_shape(dtype, (m, m), (1, 0)),
            _Shape.array_shape(np.dtype(np.int32), (), ()),
            _Shape.array_shape(dtype, (lwork,), (0,)),
        )),
        operand_shapes_with_layout=(
            _Shape.array_shape(dtype, (m, n), (1, 0)),
        ),
        opaque=opaque)
github google / jax / jaxlib / cusolver.py View on Github external
_Shape.array_shape(np.dtype(singular_vals_dtype), (min(m, n),), (0,)),
            _Shape.array_shape(dtype, (n, n), (1, 0)),
            _Shape.array_shape(dtype, (m, m), (1, 0)),
            _Shape.array_shape(np.dtype(np.int32), (), ()),
            _Shape.array_shape(dtype, (lwork,), (0,)),
        )),
        operand_shapes_with_layout=(
            _Shape.array_shape(dtype, (m, n), (1, 0)),
        ),
        opaque=opaque)
    s = c.GetTupleElement(out, 1)
    vt = c.GetTupleElement(out, 2)
    u = c.GetTupleElement(out, 3)
    info = c.GetTupleElement(out, 4)
  else:
    lwork, opaque = cusolver_kernels.build_gesvd_descriptor(
        np.dtype(dtype), b, m, n, compute_uv, full_matrices)

    out = c.CustomCall(
        b"cusolver_gesvd",
        operands=(a,),
        shape_with_layout=_Shape.tuple_shape((
            _Shape.array_shape(dtype, (m, n), (0, 1)),
            _Shape.array_shape(np.dtype(singular_vals_dtype), (min(m, n),), (0,)),
            _Shape.array_shape(dtype, (m, m), (0, 1)),
            _Shape.array_shape(dtype, (n, n), (0, 1)),
            _Shape.array_shape(np.dtype(np.int32), (), ()),
            _Shape.array_shape(dtype, (lwork,), (0,)),
        )),
        operand_shapes_with_layout=(
            _Shape.array_shape(dtype, (m, n), (0, 1)),
        ),
github google / jax / jaxlib / cusolver.py View on Github external
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import operator

import numpy as np
from six.moves import reduce

from jaxlib import xla_client

try:
  from jaxlib import cublas_kernels
  for _name, _value in cublas_kernels.registrations().items():
    xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
  pass

try:
  from jaxlib import cusolver_kernels
  for _name, _value in cusolver_kernels.registrations().items():
    xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
  pass


_Shape = xla_client.Shape


def _real_type(dtype):
github google / jax / jaxlib / cusolver.py View on Github external
m, n = dims[-2:]
  batch_dims = tuple(dims[:-2])
  num_bd = len(batch_dims)
  batch = _prod(batch_dims)
  k = m if left_side else n

  a_shape = c.GetShape(a)
  if (batch_dims + (k, k) != a_shape.dimensions() or
      a_shape.element_type() != dtype):
    raise ValueError("Argument mismatch for trsm, got {} and {}".format(
      a_shape, b_shape))

  if conj_a and not trans_a:
    raise NotImplementedError("Conjugation without transposition not supported")

  lwork, opaque = cublas_kernels.build_trsm_batched_descriptor(
    np.dtype(dtype), batch, m, n, left_side, lower, trans_a, conj_a, diag)
  layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
  out = c.CustomCall(
      b"cublas_trsm_batched",
      operands=(a, b),
      shape_with_layout=_Shape.tuple_shape((
          _Shape.array_shape(dtype, b_shape.dimensions(), layout),
          _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)),
          _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)))),
      operand_shapes_with_layout=(
          _Shape.array_shape(dtype, a_shape.dimensions(), layout),
          _Shape.array_shape(dtype, b_shape.dimensions(), layout),
      ),
      opaque=opaque)
  return c.GetTupleElement(out, 0)
github google / jax / jaxlib / cusolver.py View on Github external
import numpy as np
from six.moves import reduce

from jaxlib import xla_client

try:
  from jaxlib import cublas_kernels
  for _name, _value in cublas_kernels.registrations().items():
    xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
  pass

try:
  from jaxlib import cusolver_kernels
  for _name, _value in cusolver_kernels.registrations().items():
    xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
  pass


_Shape = xla_client.Shape


def _real_type(dtype):
  """Returns the real equivalent of 'dtype'."""
  if dtype == np.float32:
    return np.float32
  elif dtype == np.float64:
    return np.float64
  elif dtype == np.complex64:
    return np.float32
github google / jax / jaxlib / cusolver.py View on Github external
dims = a_shape.dimensions()
  assert len(dims) >= 2
  m, n = dims[-2:]
  assert m == n
  batch_dims = tuple(dims[:-2])
  num_bd = len(batch_dims)
  batch = _prod(batch_dims)
  layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))

  if n <= 32:
    kernel = b"cusolver_syevj"
    lwork, opaque = cusolver_kernels.build_syevj_descriptor(
        np.dtype(dtype), lower, batch, n)
  else:
    kernel = b"cusolver_syevd"
    lwork, opaque = cusolver_kernels.build_syevd_descriptor(
        np.dtype(dtype), lower, batch, n)
  eigvals_type = _real_type(dtype)

  out = c.CustomCall(
      kernel,
      operands=(a,),
      shape_with_layout=_Shape.tuple_shape((
          _Shape.array_shape(dtype, dims, layout),
          _Shape.array_shape(
              np.dtype(eigvals_type), batch_dims + (n,),
              tuple(range(num_bd, -1, -1))),
          _Shape.array_shape(
              np.dtype(np.int32), batch_dims,
              tuple(range(num_bd - 1, -1, -1))),
          _Shape.array_shape(dtype, (lwork,), (0,))
      )),
github google / jax / jaxlib / cusolver.py View on Github external
"""Symmetric (Hermitian) eigendecomposition."""

  a_shape = c.GetShape(a)
  dtype = a_shape.element_type()
  dims = a_shape.dimensions()
  assert len(dims) >= 2
  m, n = dims[-2:]
  assert m == n
  batch_dims = tuple(dims[:-2])
  num_bd = len(batch_dims)
  batch = _prod(batch_dims)
  layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))

  if n <= 32:
    kernel = b"cusolver_syevj"
    lwork, opaque = cusolver_kernels.build_syevj_descriptor(
        np.dtype(dtype), lower, batch, n)
  else:
    kernel = b"cusolver_syevd"
    lwork, opaque = cusolver_kernels.build_syevd_descriptor(
        np.dtype(dtype), lower, batch, n)
  eigvals_type = _real_type(dtype)

  out = c.CustomCall(
      kernel,
      operands=(a,),
      shape_with_layout=_Shape.tuple_shape((
          _Shape.array_shape(dtype, dims, layout),
          _Shape.array_shape(
              np.dtype(eigvals_type), batch_dims + (n,),
              tuple(range(num_bd, -1, -1))),
          _Shape.array_shape(