Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return self.cupy.asnumpy(x)
def arange(self, start, stop):
return self.cupy.arange(start, stop)
def stack_on_zeroth_dimension(self, tensors: list):
return self.cupy.stack(tensors)
def tile(self, x, repeats):
return self.cupy.tile(x, repeats)
def is_float_type(self, x):
return x.dtype in ('float16', 'float32', 'float64', 'float128')
class ChainerBackend(AbstractBackend):
framework_name = 'chainer'
def __init__(self):
import chainer
import numpy
self.numpy = numpy
self.chainer = chainer
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.chainer.Variable)
def from_numpy(self, x):
return self.chainer.Variable(x.astype('float32'))
def to_numpy(self, x):
if isinstance(x, self.chainer.Variable):
def __init__(self):
super(JaxBackend, self).__init__()
self.onp = self.np
import jax.numpy
self.np = jax.numpy
def from_numpy(self, x):
return self.np.asarray(x)
def to_numpy(self, x):
return self.onp.asarray(x)
class GluonBackend(AbstractBackend):
framework_name = 'mxnet.ndarray'
def __init__(self):
import mxnet
self.mx = mxnet
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.mx.nd.NDArray)
def from_numpy(self, x):
var = self.mx.nd.array(x, dtype=x.dtype)
var.attach_grad()
return var
def to_numpy(self, x):
return self.mx.nd.NDArray.asnumpy(x)
return self
def __eq__(self, other):
return True # we don't know actual size
def __mul__(self, other):
return self
def __rmul__(self, other):
return self
def __hash__(self):
return None.__hash__()
class NumpyBackend(AbstractBackend):
framework_name = 'numpy'
def __init__(self):
import numpy
self.np = numpy
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.np.ndarray)
def from_numpy(self, x):
return x
def to_numpy(self, x):
return x
def arange(self, start, stop):
def stack_on_zeroth_dimension(self, tensors: list):
return self.mx.nd.stack(*tensors)
def tile(self, x, repeats):
return self.mx.nd.tile(x, repeats)
def is_float_type(self, x):
return 'float' in str(x.dtype)
def layers(self):
from .layers import gluon
return gluon
class MXNetBackend(AbstractBackend):
framework_name = 'mxnet.symbol'
def __init__(self):
import mxnet
self.mx = mxnet
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.mx.symbol.Symbol)
def create_symbol(self, shape, dtype='float32'):
# mxnet accepts zeros as undefined dimensions
shape = tuple(0 if d is None else d for d in shape)
var = self.mx.symbol.Variable('input', shape=shape, dtype=dtype)
return var
def eval_symbol(self, symbol, input_dict):
def stack_on_zeroth_dimension(self, tensors: list):
return self.torch.stack(tensors)
def tile(self, x, repeats):
return x.repeat(repeats)
def is_float_type(self, x):
return x.dtype in [self.torch.float16, self.torch.float32, self.torch.float64]
def layers(self):
from .layers import torch
return torch
class CupyBackend(AbstractBackend):
framework_name = 'cupy'
def __init__(self):
import cupy
self.cupy = cupy
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.cupy.ndarray)
def from_numpy(self, x):
return self.cupy.asarray(x)
def to_numpy(self, x):
return self.cupy.asnumpy(x)
def arange(self, start, stop):
def stack_on_zeroth_dimension(self, tensors: list):
return self.mx.symbol.stack(*tensors)
def tile(self, x, repeats):
return self.mx.symbol.tile(x, repeats)
def is_float_type(self, x):
return 'float' in str(x.infer_type()[1][0])
def layers(self):
from .layers import gluon
return gluon
class TorchBackend(AbstractBackend):
framework_name = 'torch'
def __init__(self):
import torch
self.torch = torch
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.torch.Tensor)
def from_numpy(self, x):
variable = self.torch.from_numpy(x)
if self.is_float_type(variable):
# attach grad only to floating types
variable.requires_grad = True
return variable
return self.tf.reshape(x, shape)
def transpose(self, x, axes):
return self.tf.transpose(x, axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.tf.stack(tensors)
def tile(self, x, repeats):
return self.tf.tile(x, repeats)
def is_float_type(self, x):
return x.dtype in ('float16', 'float32', 'float64', 'float128')
class KerasBackend(AbstractBackend):
framework_name = 'keras'
def __init__(self):
import keras
self.keras = keras
self.K = keras.backend
def is_appropriate_type(self, tensor):
return self.K.is_tensor(tensor) and self.K.is_keras_tensor(tensor)
def create_symbol(self, shape):
return self.keras.Input(batch_shape=shape)
def eval_symbol(self, symbol, input_dict):
(variable, value), = input_dict
model = self.keras.models.Model(variable, symbol)
def stack_on_zeroth_dimension(self, tensors: list):
return self.chainer.functions.stack(tensors)
def tile(self, x, repeats):
return self.chainer.functions.tile(x, repeats)
def is_float_type(self, x):
return x.dtype in ('float16', 'float32', 'float64', 'float128')
def layers(self):
from .layers import chainer
return chainer
class TensorflowBackend(AbstractBackend):
framework_name = 'tensorflow'
def __init__(self):
import tensorflow
self.tf = tensorflow
def is_appropriate_type(self, tensor):
return isinstance(tensor, (self.tf.Tensor, self.tf.Variable))
def from_numpy(self, x):
assert self.tf.executing_eagerly()
return self.tf.convert_to_tensor(x)
def to_numpy(self, x):
assert self.tf.executing_eagerly()
return x.numpy()