How to use the crypten.mpc.mpc.MPCTensor function in crypten

To help you get started, we’ve selected a few crypten examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
if isinstance(p, float) and int(p) == p:
            p = int(p)

        if not isinstance(p, int):
            raise TypeError(
                "pow must take an integer exponent. For non-integer powers, use"
                " pos_pow with positive-valued base."
            )
        if p < -1:
            return self.reciprocal(**kwargs).pow(-p)
        elif p == -1:
            return self.reciprocal(**kwargs)
        elif p == 0:
            # Note: This returns 0 ** 0 -> 1 when inputs have zeros.
            # This is consistent with PyTorch's pow function.
            return MPCTensor(torch.ones(self.size()))
        elif p == 1:
            return self.clone()
        elif p == 2:
            return self.square()
        elif p % 2 == 0:
            return self.square().pow(p // 2)
        else:
            return self.square().mul_(self).pow((p - 1) // 2)
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
def split(self, split_size, dim=0):
        shares = self.share.split(split_size, dim=dim)
        results = tuple(MPCTensor(0, ptype=self.ptype) for _ in range(len(shares)))
        for i in range(len(shares)):
            results[i].share = shares[i]
        return results
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
If multiple values are equal to the maximum, ties will be broken
        (randomly). Note that this deviates from PyTorch's implementation since
        PyTorch does not break ties randomly, but rather returns the lowest
        index of a maximal value.

        If `keepdim` is `True`, the output tensor are of the same size as
        `input` except in the dimension `dim` where they are of size 1.
        Otherwise, `dim` is squeezed, resulting in the output tensors having 1
        fewer dimension than `input`.

        If `one_hot` is `True`, the output tensor will have the same size as the
        input and contain elements of value `1` on argmax indices (with random
        tiebreaking) and value `0` on other indices.
        """
        if self.dim() == 0:
            return MPCTensor(torch.ones(())) if one_hot else MPCTensor(torch.zeros(()))

        input = self.flatten() if dim is None else self.transpose(dim, -1)

        result = input._argmax_helper()

        # Multiply by a random permutation to give each maximum a random priority
        result *= crypten.mpc.randperm(input.size())
        result = result._argmax_helper()

        result = result.view(self.size()) if dim is None else result.transpose(dim, -1)
        return result if one_hot else _one_hot_to_index(result, dim, keepdim)
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
def __setitem__(self, index, value):
        """Set tensor values by index"""
        if not isinstance(value, MPCTensor):
            value = MPCTensor(value, ptype=self.ptype)
        self._tensor.__setitem__(index, value._tensor)
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
def softmax(self, dim, **kwargs):
        """Compute the softmax of a tensor's elements along a given dimension
        """
        # 0-d case
        if self.dim() == 0:
            assert dim == 0, "Improper dim argument"
            return MPCTensor(torch.ones(()))

        if self.size(dim) == 1:
            return MPCTensor(torch.ones(self.size()))

        maximum_value = self.max(dim, keepdim=True)[0]
        logits = self - maximum_value
        numerator = logits.exp()
        denominator = numerator.sum(dim, keepdim=True)
        return numerator / denominator
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
def shallow_copy(self):
        """Create a shallow copy of the input tensor"""
        result = MPCTensor(None)
        result._tensor = self._tensor
        result.ptype = self.ptype
        return result
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
def scatter_(self, dim, index, src):
        """Writes all values from the tensor `src` into `self` at the indices
        specified in the `index` tensor. For each value in `src`, its output index
        is specified by its index in `src` for `dimension != dim` and by the
        corresponding value in `index` for `dimension = dim`.
        """
        if torch.is_tensor(src):
            src = MPCTensor(src)
        assert isinstance(src, MPCTensor), "Unrecognized scatter src type: %s" % type(
            src
        )
        self.share.scatter_(dim, index, src.share)
        return self
github facebookresearch / CrypTen / crypten / mpc / __init__.py View on Github external
def randperm(size):
    """
        Generate an MPCTensor with rows that contain values [1, 2, ... n]
        where `n` is the length of each row (size[-1])
    """
    result = MPCTensor(None)
    result._tensor = __default_provider.randperm(size)
    result.ptype = ptype.arithmetic
    return result
github facebookresearch / CrypTen / crypten / mpc / mpc.py View on Github external
# Computes one-hot gradient blocks from each output variable that
        # has non-zero value corresponding to the argmax of the corresponding
        # block of the max_pool2d input.
        kernels = self.view(self.size() + (1, 1)) * indices

        # Use minimal size if output_size is not specified.
        if output_size is None:
            output_size = (
                self.size(0),
                self.size(1),
                s0 * self.size(2) - 2 * p0,
                s1 * self.size(3) - 2 * p1,
            )

        # Sum the one-hot gradient blocks at corresponding index locations.
        result = MPCTensor(torch.zeros(output_size)).pad([p0, p0, p1, p1])
        for i in range(self.size(2)):
            for j in range(self.size(3)):
                left_ind = s0 * i
                top_ind = s1 * j

                result[
                    :, :, left_ind : left_ind + k0, top_ind : top_ind + k1
                ] += kernels[:, :, i, j]

        result = result[:, :, p0 : result.size(2) - p0, p1 : result.size(3) - p1]
        return result