How to use the tensornetwork.backends.backend_factory.get_backend function in tensornetwork

To help you get started, we’ve selected a few tensornetwork examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / TensorNetwork / tensornetwork / network_operations.py View on Github external
def switch_backend(nodes: Iterable[BaseNode], new_backend: Text) -> None:
  """Change the backend of the nodes.

  This will convert all node's tensors to the new backend's Tensor type.
  Args:
    nodes: iterable of nodes
    new_backend (str): The new backend.
    dtype (datatype): The dtype of the backend. If None, a defautl dtype according
                       to config.py will be chosen.
  """
  backend = backend_factory.get_backend(new_backend)
  for node in nodes:
    if node.backend.name != "numpy":
      raise NotImplementedError("Can only switch backends when the current "
                                "backend is 'numpy'. Current backend "
                                "is '{}'".format(node.backend))
    node.tensor = backend.convert_to_tensor(node.tensor)
    node.backend = backend
github google / TensorNetwork / tensornetwork / block_tensor / block_tensor.py View on Github external
def randn(cls, indices: List[Index],
            dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor":
    """
    Initialize a random symmetric tensor from random normal distribution.
    Args:
      indices: List of `Index` objecst, one for each leg. 
      dtype: An optional numpy dtype. The dtype of the tensor
    Returns:
      BlockSparseTensor
    """
    charges = [i.charges for i in indices]
    flows = [i.flow for i in indices]
    num_non_zero_elements = compute_num_nonzero(charges, flows)
    backend = backend_factory.get_backend('numpy')
    data = backend.randn((num_non_zero_elements,), dtype=dtype)
    return cls(data=data, indices=indices)
github google / TensorNetwork / tensornetwork / matrixproductstates / infinite_mps.py View on Github external
dtype: Type[np.number],
             backend: Optional[Text] = None):
    """
    Initialize a random `InfiniteMPS`. The resulting state
    is normalized. Its center-position is at 0.

    Args:
      d: A list of physical dimensions.
      D: A list of bond dimensions.
      dtype: A numpy dtype.
      backend: An optional backend.
    Returns:
      `InfiniteMPS`
    """
    #use numpy backend for tensor initialization
    be = backend_factory.get_backend('numpy')
    if len(D) != len(d) + 1:
      raise ValueError('len(D) = {} is different from len(d) + 1= {}'.format(
          len(D),
          len(d) + 1))
    if D[-1] != D[0]:
      raise ValueError('D[0]={} != D[-1]={}.'.format(D[0], D[-1]))

    tensors = [
        be.randn((D[n], d[n], D[n + 1]), dtype=dtype) for n in range(len(d))
    ]
    return cls(tensors=tensors, center_position=0, backend=backend)
github google / TensorNetwork / tensornetwork / network_components.py View on Github external
backend: The name of the backend or an instance of a `BaseBackend`.

    Raises:
      ValueError: If there is a repeated name in `axis_names` or if the length
        doesn't match the shape of the tensor.
    """
    if isinstance(tensor, BaseNode):
      #always use the `Node`'s backend
      backend = tensor.backend
      tensor = tensor.tensor
    if not backend:
      backend = config.default_backend
    if isinstance(backend, BaseBackend):
      backend_obj = backend
    else:
      backend_obj = backend_factory.get_backend(backend)
    self._tensor = backend_obj.convert_to_tensor(tensor)
    super().__init__(
        name=name,
        axis_names=axis_names,
        backend=backend_obj,
        shape=backend_obj.shape_tuple(self._tensor))
github google / TensorNetwork / tensornetwork / matrixproductstates / finite_mps.py View on Github external
dtype: Type[np.number],
             backend: Optional[Text] = None):
    """
    Initialize a random `FiniteMPS`. The resulting state
    is normalized. Its center-position is at 0.

    Args:
      d: A list of physical dimensions.
      D: A list of bond dimensions.
      dtype: A numpy dtype.
      backend: An optional backend.
    Returns:
      `FiniteMPS`
    """
    #use numpy backend for tensor initialization
    be = backend_factory.get_backend('numpy')
    if len(D) != len(d) - 1:
      raise ValueError('len(D) = {} is different from len(d) - 1 = {}'.format(
          len(D),
          len(d) - 1))
    D = [1] + D + [1]
    tensors = [
        be.randn((D[n], d[n], D[n + 1]), dtype=dtype) for n in range(len(d))
    ]
    return cls(tensors=tensors, center_position=0, backend=backend)
github google / TensorNetwork / tensornetwork / network_components.py View on Github external
Args:
      rank: The rank of the tensor.
      dimension: The dimension of each leg.
      name: A name for the node.
      axis_names:  axis_names for the node.
      backend: An optional backend for the node. If `None`, a default
        backend is used
      dtype: The dtype used to initialize a numpy-copy node.
        Note that this dtype has to be a numpy dtype, and it has to be 
        compatible with the dtype of the backend, e.g. for a tensorflow
        backend with a tf.Dtype=tf.floa32, `dtype` has to be `np.float32`.
    """

    if not backend:
      backend = config.default_backend
    backend_obj = backend_factory.get_backend(backend)

    self.rank = rank
    self.dimension = dimension
    self._tensor = None
    self.copy_node_dtype = dtype

    super().__init__(
        name=name,
        axis_names=axis_names,
        backend=backend_obj,
        shape=(dimension,) * rank)