How to use the crypten.nn.module.Module function in crypten

To help you get started, we’ve selected a few crypten examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
def forward(self, input):
        return input.sum(dim=self.dim, keepdim=self.keepdim)

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        if attributes is None:
            attributes = {}
        dim = attributes["axes"]
        if "keepdims" not in attributes:
            attributes["keepdims"] = 1
        keepdim = True if attributes["keepdims"] == 1 else False
        return ReduceSum(dim, keepdim)


class Squeeze(Module):
    r"""
    Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.

    For example, if `input` is of shape:
    :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
    will be of shape: :math:`(A \times B \times C \times D)`.

    When :attr:`dimension` is given, a squeeze operation is done only in the given
    dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
    ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
    will squeeze the tensor to the shape :math:`(A \times B)`.

    .. note:: The returned tensor shares the storage with the input tensor,
            so changing the contents of one will change the contents of the other.

    Args:
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
def from_onnx(parameters=None, attributes=None):
        if parameters is None:
            parameters = {}

        # create module:
        in_features = parameters["weight"].size(1)
        out_features = parameters["weight"].size(0)
        module = Linear(in_features, out_features, bias=("bias" in parameters))

        # set parameters:
        for key, value in parameters.items():
            module.set_parameter(key, value)
        return module


class Conv2d(Module):
    r"""
    Module that performs 2D convolution.

    Applies a 2D convolution over an input signal composed of several input
    planes.

    In the simplest case, the output value of the layer with input size
    :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
    can be precisely described as:

    .. math::
        \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
        \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)


    where :math:`\star` is the valid 2D `cross-correlation`_ operator,
github facebookresearch / CrypTen / crypten / nn / loss.py View on Github external
#!/usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from ..autograd_cryptensor import AutogradCrypTensor
from .module import Module


class _Loss(Module):
    """
    Base criterion class that mimics Pytorch's Loss.
    """

    def __init__(self, reduction="mean"):
        super(_Loss, self).__init__()
        if reduction != "mean":
            raise NotImplementedError("reduction %s not supported")
        self.reduction = reduction

    def forward(self, x, y):
        raise NotImplementedError("forward not implemented")

    def __call__(self, x, y):
        return self.forward(x, y)
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
def forward(self, input):
        assert isinstance(input, (list, tuple)), "input must be list or tuple"
        tensor, shape = input

        # shape is not data so we can get plain text
        if crypten.is_encrypted_tensor(shape):
            shape = shape.get_plain_text()
        return tensor.reshape(shape.long().tolist())

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        return Reshape()


class Dropout(Module):
    r"""During training, randomly zeroes some of the elements of the input
    tensor with probability :attr:`p` using samples from a Bernoulli
    distribution. Furthermore, the outputs are scaled by a factor of
    :math:`\frac{1}{1-p}` during training. This means that during evaluation
    the module simply computes an identity function.

    Args:
        p: probability of an element to be zeroed. Default: 0.5

    Shape:
        - Input: :math:`(*)`. Input can be of any shape
        - Output: :math:`(*)`. Output is of the same shape as input
    """

    def __init__(self, p=0.5):
        super().__init__()
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
class Sub(Module):
    """
    Module that subtracts two values.
    """

    def forward(self, input):
        assert isinstance(input, (list, tuple)), "input must be list or tuple"
        assert len(input) == 2, "input must contain two tensors"
        return input[0].sub(input[1])

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        return Sub()


class Exp(Module):
    """
    Module that calculates the exponential of the given input tensor, element-wise.
    """

    def forward(self, input):
        return input.exp()

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        return Exp()


class ReduceSum(Module):
    """
    Module that computes the sum of the input tensor's element along the provided axes.
    If `keepdim` is True, the output tensor is of the same size as input
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
self.p = p

    def forward(self, input):
        if self.training:
            result = input.dropout(p=self.p)
            return result
        return input

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        if attributes is None:
            attributes = {}
        return Dropout(attributes["ratio"])


class DropoutNd(Module):
    """Randomly zero out entire channels (a channel is a nD feature map,
    e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
    batched input is a nD tensor :math:`\text{input}[i, j]`).
    Each channel will be zeroed out independently on every forward call with
    probability :attr:`p` using samples from a Bernoulli distribution.

    Args:
        p (float, optional): probability of an element to be zero-ed.
    """

    def __init__(self, p=0.5):
        super().__init__()
        self.p = p

    def forward(self, input):
        if self.training:
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
tensor, indices = input

        # indices are not data so we can get plain text:
        if crypten.is_encrypted_tensor(indices):
            indices = indices.get_plain_text().long()
        result = tensor.take(indices, self.dimension)
        return result

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        if attributes is None:
            attributes = {}
        return Gather(attributes["axis"])


class _ConstantPad(Module):
    """
    Module that pads a tensor.
    """

    def __init__(self, padding, value, mode="constant"):
        super().__init__()
        if isinstance(padding, (int)):
            padding = [padding]
        self.padding = padding
        self.value = value
        self.mode = mode

    def forward(self, input):
        return input.pad(self.padding, value=self.value, mode="constant")

    @staticmethod
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
remove_from(self.__dict__, self._parameters, self._buffers)
            modules[name] = value
        elif modules is not None and name in modules:
            if value is not None:
                raise TypeError(
                    "cannot assign '{}' as child module '{}' "
                    "(torch.nn.Module or None expected)".format(
                        torch.typename(value), name
                    )
                )
            modules[name] = value
        else:
            object.__setattr__(self, name, value)


class Container(Module):
    """
    Container allows distinguishing between individual modules and containers.
    """

    pass


class Graph(Container):
    """
    Acyclic graph of modules.

    The module maintains a dict of named modules and a graph structure stored in
    a dict where each key is a module name, and the associated value is a list
    of module names that provide the input into the module.
    """
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
# sum over all but batch dimension:
        result = input.shallow_copy()
        for dim in range(2, input.dim()):
            result = result.sum(dim=dim, keepdim=True)

        # return average value:
        first_two_dims = input.size(0) * input.size(1)
        return result.div(input.nelement() / float(first_two_dims))

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        return GlobalAveragePool()


class _BatchNorm(Module):
    """
    Module that performs batch normalization on 1D tensors.
    """

    def __init__(self, num_features, eps=1e-05, momentum=0.1):
        super().__init__()

        # initialize model parameters and buffers:
        pytorch_module = torch.nn.BatchNorm1d(num_features)
        for param in ["weight", "bias"]:
            self.register_parameter(param, getattr(pytorch_module, param))
        for buffer in ["running_mean", "running_var"]:
            self.register_buffer(buffer, getattr(pytorch_module, buffer))

        # set model attributes:
        self.eps = eps
github facebookresearch / CrypTen / crypten / nn / module.py View on Github external
prod *= x.size(i)
            return x.view(prod, -1)

    @staticmethod
    def from_onnx(parameters=None, attributes=None):
        if attributes is None:
            attributes = {}
        # axis : int (default is 1)
        axis = 1
        if "axis" in attributes:
            axis = int(attributes["axis"])
            assert axis >= 0, "axis must not be negative"
        return Flatten(axis)


class Shape(Module):
    """
    Module that returns the shape of a tensor. If the input tensor is encrypted,
    the output size vector will be encrypted, too.
    """

    def __init__(self):
        super().__init__()

    def forward(self, x):
        size = torch.tensor(x.size())
        if crypten.is_encrypted_tensor(x):
            size = crypten.cryptensor(size.float())
        return size

    @staticmethod
    def from_onnx(parameters=None, attributes=None):