How to use the syft.Variable function in syft

To help you get started, we’ve selected a few syft examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
if already_encoded:
            self.child = child
        else:
            # torch_utils.assert_has_only_torch_tensorvars(child)
            chain_tail = None
            if not isinstance(child.child, sy._LocalTensor):
                chain_tail = child.child

            if torch_utils.is_variable(child):
                var_data = child.data
                if len(var_data.size()) > 0:
                    self.encode(var_data)  # this puts in .child an encoded Tensor
                    self.child = sy.Variable(self.child)
                else:
                    self.child = sy.Variable(sy.LongTensor())
                self.child.child = chain_tail
            else:
                if len(child.size()) > 0:
                    self.encode(child)
                else:
                    self.child = sy.LongTensor()
                self.child.child = chain_tail
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
result_precision_fractional = max(
                    self.precision_fractional, other.precision_fractional
                )
            else:
                result_precision_fractional = self.precision_fractional

            result_precision_integral = self.precision_integral
            result_precision = result_precision_fractional + result_precision_integral
            result_kappa = self.kappa

            if result_precision_fractional > 0:
                tail_node = torch_utils.find_tail_of_chain(torch_tensorvar)
                # print("result_precision_fractional > 0")
                if isinstance(tail_node, sy._GeneralizedPointerTensor):
                    # print("truncating MPC")
                    if isinstance(torch_tensorvar, sy.Variable):
                        a = torch_tensorvar.data
                    else:
                        a = torch_tensorvar

                    workers = list(tail_node.pointer_tensor_dict.keys())

                    b_ = int((self.base ** (2 * result_precision + 1)))

                    b = a + b_

                    rand_shape = torch.IntTensor(list(b.get_shape())).prod()

                    mask = (
                        torch.LongTensor(1)
                        .send(workers[0])
                        .expand(rand_shape)
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def init_grad_(self):
        """Initialise grad as an empty tensor."""
        if self.grad is None or torch_utils.is_tensor_empty(self.grad):
            var_grad = sy.Variable(sy.zeros(self.size()))
            if type(var_grad.data) != type(self.data):  # noqa: E721
                var_grad.data = var_grad.data.type(type(self.data))
            self.grad = var_grad
            self.grad.native_set_()
            self.grad.child.owner = self.owner
            self.grad.data.child.owner = self.owner
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def get(self, deregister_ptr: bool = False):
        if torch_utils.is_variable(self.child):
            var = sy.Variable(self.data.get())
            var.child = None
            if hasattr(self, "grad") and self.grad is not None:
                var_grad = self.grad.shares.child.sum_get()
                value = var_grad.data % spdz.field
                # TODO: Add this thing for negative values
                # gate = (value > spdz.torch_max_value).long()
                # neg_nums = (value - spdz.torch_field) * gate
                # pos_nums = value * (1 - gate)
                # result = neg_nums + pos_nums
                var_grad.data = value
                var.init_grad_()
                var.assign_grad_(var_grad)
            return var
        # TODO: have deregister_ptr do something
        value = self.shares.child.sum_get() % spdz.field
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def share_scalar(self, scalar: int):
        other = torch.zeros(list(self.get_shape())).long() + scalar

        # if the parent is a Variable type then we need to cast this to
        # a Varible of longs instead (which is silly and redundant but
        # i need this to work by Monday so i'm hacking this here... realistically
        # SPDZTensor should NEVER point to variable objects TODO:fix
        if self.torch_type == "syft.Variable":
            other = sy.Variable(other)

        other = other.share(*list(self.shares.child.pointer_tensor_dict.keys())).child

        return other
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
if already_encoded:
            self.child = child
        else:
            # torch_utils.assert_has_only_torch_tensorvars(child)
            chain_tail = None
            if not isinstance(child.child, sy._LocalTensor):
                chain_tail = child.child

            if torch_utils.is_variable(child):
                var_data = child.data
                if len(var_data.size()) > 0:
                    self.encode(var_data)  # this puts in .child an encoded Tensor
                    self.child = sy.Variable(self.child)
                else:
                    self.child = sy.Variable(sy.LongTensor())
                self.child.child = chain_tail
            else:
                if len(child.size()) > 0:
                    self.encode(child)
                else:
                    self.child = sy.LongTensor()
                self.child.child = chain_tail
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def _share(self, n_workers):
        if torch_utils.is_variable(self):
            data_shares = self.data._share(n_workers)
            shares = []
            for data_share in data_shares:
                shares.append(sy.Variable(data_share))
            return shares
        else:
            if not isinstance(self, torch.LongTensor):
                raise TypeError(
                    "Can only MPCShare LongTensor type. You tried to share "
                    + str(type(self).__name__)
                    + "."
                    + " Do you need to call .fix_precision() first?"
                )
            return spdz.share(self, n_workers)
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
gp_response = cls.__rsub__(self, *args, **kwargs)
            elif attr == "sum":
                gp_response = cls.sum(self, *args, **kwargs)
            elif attr == "cumsum":
                gp_response = cls.sum(self, *args, **kwargs)
            elif attr == "mm":
                gp_response = cls.mm(self, *args, **kwargs)
            elif attr == "set_":
                gp_response = cls.set_(self, *args, **kwargs)
                return gp_response
            else:
                gp_response = getattr(self.child, attr)(*args, **kwargs)

            if torch_utils.is_variable_name(gp_response.child.torch_type):
                var_data_type = gp_response.child.data.torch_type
                variable = sy.Variable(torch.guard[var_data_type]())
                variable.init_grad_()
                mpc_node = type(self)(gp_response)
                mpc_node.data = type(self)(gp_response.data)
                mpc_node.grad = type(self)(gp_response.grad)
                mpc_node.grad.data = type(self)(gp_response.grad.data)
                mpc_node.grad.data.child.child = None  # FIXME: is it necessary?
                torch_utils.bind_var_nodes(variable, mpc_node, grad=True)
                return variable
            else:
                response = type(self)(gp_response).wrap(True)
                return response
        else:
            if attr == "torch.cat":
                args = torch_utils.get_child_command(args)[0]
                kwargs = torch_utils.get_child_command(kwargs)[0]
                response = torch.cat(*args, **kwargs)
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def get(self, deregister_ptr: bool = False):
        if torch_utils.is_variable(self.child):
            var = sy.Variable(self.data.get())
            var.child = None
            if hasattr(self, "grad") and self.grad is not None:
                var_grad = self.grad.shares.child.sum_get()
                value = var_grad.data % spdz.field
                # TODO: Add this thing for negative values
                # gate = (value > spdz.torch_max_value).long()
                # neg_nums = (value - spdz.torch_field) * gate
                # pos_nums = value * (1 - gate)
                # result = neg_nums + pos_nums
                var_grad.data = value
                var.init_grad_()
                var.assign_grad_(var_grad)
            return var
        # TODO: have deregister_ptr do something
        value = self.shares.child.sum_get() % spdz.field
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
result_precision_fractional = max(
                    self.precision_fractional, other.precision_fractional
                )
            else:
                result_precision_fractional = self.precision_fractional

            result_precision_integral = self.precision_integral
            result_precision = result_precision_fractional + result_precision_integral
            result_kappa = self.kappa

            if result_precision_fractional > 0:
                tail_node = torch_utils.find_tail_of_chain(torch_tensorvar)
                # print("result_precision_fractional > 0")
                if isinstance(tail_node, sy._GeneralizedPointerTensor):
                    # print("truncating MPC")
                    if isinstance(torch_tensorvar, sy.Variable):
                        a = torch_tensorvar.data
                    else:
                        a = torch_tensorvar

                    workers = list(tail_node.pointer_tensor_dict.keys())

                    b_ = int((self.base ** (2 * result_precision + 1)))

                    b = a + b_

                    rand_shape = torch.IntTensor(list(b.get_shape())).prod()

                    mask = (
                        torch.LongTensor(1)
                        .send(workers[0])
                        .expand(rand_shape)