How to use the syft.core.frameworks.torch.utils.is_variable function in syft

To help you get started, we’ve selected a few syft examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
self.base = base
        self.precision_fractional = precision_fractional
        self.precision_integral = precision_integral
        self.precision = self.precision_fractional + self.precision_integral
        self.torch_max_value = torch.LongTensor([round(self.field / 2)])
        self.kappa = kappa

        if already_encoded:
            self.child = child
        else:
            # torch_utils.assert_has_only_torch_tensorvars(child)
            chain_tail = None
            if not isinstance(child.child, sy._LocalTensor):
                chain_tail = child.child

            if torch_utils.is_variable(child):
                var_data = child.data
                if len(var_data.size()) > 0:
                    self.encode(var_data)  # this puts in .child an encoded Tensor
                    self.child = sy.Variable(self.child)
                else:
                    self.child = sy.Variable(sy.LongTensor())
                self.child.child = chain_tail
            else:
                if len(child.size()) > 0:
                    self.encode(child)
                else:
                    self.child = sy.LongTensor()
                self.child.child = chain_tail
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
field: int = (2 ** 31) - 1,
        base: int = 10,
        precision_fractional: int = 3,
        already_encoded: bool = False,
    ):
        def _fix_precison(tensorvar, is_encoded):
            return _FixedPrecisionTensor(
                tensorvar,
                torch_type=tensorvar.child.torch_type,
                field=field,
                base=base,
                precision_fractional=precision_fractional,
                already_encoded=is_encoded,
            ).wrap(True)

        if torch_utils.is_variable(self):
            if not hasattr(self, "grad") or self.grad is None:
                self.init_grad_()

        if isinstance(self.child, _PointerTensor):
            return self.owner._execute_call("fix_precision", self)
        else:
            fpt = _fix_precison
            if torch_utils.is_variable(self):
                _var = fpt(self, already_encoded)
                # This 2nc fpt() is just a linking:
                # Var ------> FixP -------> Var
                #  \                         \
                # data -----> FixP - - - -> data
                #                   (link)
                _var.data.child = fpt(_var.child.child.data, True).child
                _var.data.child.torch_type = self.data.child.torch_type
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def _share(self, n_workers):
        if torch_utils.is_variable(self):
            data_shares = self.data._share(n_workers)
            shares = []
            for data_share in data_shares:
                shares.append(sy.Variable(data_share))
            return shares
        else:
            if not isinstance(self, torch.LongTensor):
                raise TypeError(
                    "Can only MPCShare LongTensor type. You tried to share "
                    + str(type(self).__name__)
                    + "."
                    + " Do you need to call .fix_precision() first?"
                )
            return spdz.share(self, n_workers)
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
if isinstance(self.child, _PointerTensor):
            response = self.child.share(*workers)
            if torch_utils.is_variable(self):
                self_copy = self
                self_copy.child = response
                self_copy.data.child = response.data
                self_copy.grad.child = response.grad
                self_copy.grad.data.child = response.grad.data
                return self_copy
            else:
                return response.wrap(True)

        elif isinstance(self.child, _FixedPrecisionTensor):
            var_shared = self.child.child.share(*workers)
            self.child.child = var_shared
            if torch_utils.is_variable(self):
                self.data.child.child = var_shared.data
                if hasattr(self, "grad") and self.grad is not None:
                    self.grad.child.child = var_shared.grad
                    self.grad.data.child.child = var_shared.grad.data
            return self

        else:
            is_variable = torch_utils.is_variable(self)
            if is_variable:
                if not hasattr(self, "grad") or self.grad is None:
                    self.init_grad_()
            n_workers = len(workers)
            shares = self._share(n_workers)

            pointer_shares_dict = {}
            for share, worker in zip(shares, workers):
github OpenMined / PySyft / syft / core / workers / base.py View on Github external
raw_command, replace_tensorvar_with_child=True
            )

            # torch_utils.assert_has_only_syft_tensors(syft_command)

        # Note: because we have pb of registration of tensors with the right worker,
        # and because having Virtual workers creates even more ambiguity, we specify the worker
        # performing the operation

        result = child_type.handle_call(syft_command, owner=self)

        if is_torch_command:
            # Wrap the result
            if has_self and utils.is_in_place_method(attr):
                # TODO: fix this properly: don't wrap the same way if syft or Variable
                if torch_utils.is_variable(result) or torch_utils.is_tensor(result):
                    wrapper = torch_utils.bind_tensor_nodes(
                        raw_command["self"], result.child
                    )
                else:
                    wrapper = torch_utils.bind_tensor_nodes(raw_command["self"], result)
            else:
                wrapper = torch_utils.wrap_command(result)
            torch_utils.enforce_owner(wrapper, self)
            return wrapper
        else:
            # We don't need to wrap
            torch_utils.enforce_owner(result, self)
            return result
github OpenMined / PySyft / syft / core / workers / base.py View on Github external
def register(self, result):
        """Register an object with SyftTensors."""
        if issubclass(type(result), sy._SyftTensor):
            syft_obj = result
            self.register_object(syft_obj)
        elif torch_utils.is_tensor(result):
            tensor = result
            self.register_object(tensor.child)
        elif torch_utils.is_variable(result):
            variable = result
            self.register(variable.child)
            self.register(variable.data.child)
            if not hasattr(variable, "grad") or variable.grad is None:
                variable.init_grad_()
            self.register(variable.grad.child)
            self.register(variable.grad.data.child)
        # Case of a iter type non json serializable
        elif isinstance(result, (list, tuple, set, bytearray, range)):
            for res in result:
                self.register(res)
        elif result is None:
            """do nothing."""
        elif isinstance(result, np.ndarray):
            self.register_object(result)
        else:
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
def get_shape(self) -> Union[tuple, list]:
        if torch_utils.is_tensor(self.child) or torch_utils.is_variable(self.child):
            return self.child.shape
        else:
            return self.child.get_shape()
github OpenMined / PySyft / syft / core / frameworks / torch / tensor.py View on Github external
if isinstance(response, (int, float, bool)):
                response = torch_type([response])
            elif isinstance(response, (np.ndarray,)):
                logging.warning("[np.ndarray] Hardcoding FloatTensor")
                response = sy.FloatTensor(response)
        else:
            # If not we can directly return
            if isinstance(response, (int, float, bool, np.ndarray)):
                return response

        # If the command is an in-place method, wrap self and return
        if has_self and utils.is_in_place_method(attr):
            # wrap the main element
            torch_utils.bind_tensor_nodes(syft_command["self"], response)

            if torch_utils.is_variable(response):
                # Also wrap the data if it's a variable
                # (don't use bind_tensor_nodes: the chain is not well formed yet)
                syft_command["self"].child.data = response.data
                # And wrap the grad if there is one
                if response.grad is not None:
                    if response.grad.data.dim() > 0:
                        syft_command["self"].child.grad = response.grad
                    else:
                        syft_command["self"].child.grad.native_set_()

            return_response = syft_command["self"]
        else:
            try:
                assert isinstance(
                    response._child, (_SPDZTensor, _SNNTensor, _FixedPrecisionTensor)
                )
github OpenMined / PySyft / syft / core / frameworks / encode.py View on Github external
tail_object = torch_utils.find_tail_of_chain(obj)
            if self.retrieve_pointers and isinstance(tail_object, sy._PointerTensor):
                self.found_pointers.append(tail_object)
            return obj.ser(private=private_local)
        # Case of basic types
        elif isinstance(obj, (int, float, str)) or obj is None:
            return obj
        # List
        elif isinstance(obj, list):
            return [self.python_encode(i, private_local) for i in obj]
        # Iterables non json-serializable
        elif isinstance(obj, (tuple, set, bytearray, range)):
            key = get_serialized_key(obj)
            return {key: [self.python_encode(i, private_local) for i in obj]}
        # Variable
        elif torch_utils.is_variable(obj):
            tail_object = torch_utils.find_tail_of_chain(obj)
            if self.retrieve_pointers and isinstance(tail_object, sy._PointerTensor):
                self.found_pointers.append(tail_object)
            return obj.ser(private=private_local, is_head=True)
        # Tensors
        elif torch_utils.is_tensor(obj):
            tail_object = torch_utils.find_tail_of_chain(obj)
            if self.retrieve_pointers and isinstance(tail_object, sy._PointerTensor):
                self.found_pointers.append(tail_object)
            return obj.ser(private=private_local)
        # Ellipsis
        elif isinstance(obj, type(...)):
            return "..."
        # np.array
        elif isinstance(obj, np.ndarray):
            return obj.ser(private=private_local, to_json=False)