How to use the syft.generic.frameworks.hook.hook_args.unwrap_args_from_method function in syft

To help you get started, we’ve selected a few syft examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OpenMined / PySyft / syft / frameworks / torch / tensors / interpreters / precision.py View on Github external
new_self = other.child
                new_other = self
            elif cmd == "div":
                # TODO how to divide by AST?
                raise NotImplementedError(
                    "Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
                )

        elif (
            cmd == "mul"
            and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
            and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
        ):
            # If we try to multiply a FPT>torch.tensor with a FPT>AST,
            # we swap operators so that we do the same operation as above
            new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)

            # To avoid problems with negative numbers
            # we take absolute value of the operands
            # The problems could be 1) bad truncation for multiplication
            # 2) overflow when scaling self in division

            # sgn_self is 1 when new_self is positive else it's 0
            # The comparison is different is new_self is a torch tensor or an AST
            sgn_self = (
                (new_self < self.field // 2).long()
                if isinstance(new_self, torch.Tensor)
                else new_self > 0
github OpenMined / PySyft / syft / frameworks / torch / tensors / interpreters / precision.py View on Github external
new_self = other.child
                new_other = self
            elif cmd == "div":
                # TODO how to divide by AST?
                raise NotImplementedError(
                    "Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
                )

        elif (
            cmd == "mul"
            and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
            and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
        ):
            # If we try to multiply a FPT>torch.tensor with a FPT>AST,
            # we swap operators so that we do the same operation as above
            new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)

            # To avoid problems with negative numbers
            # we take absolute value of the operands
            # The problems could be 1) bad truncation for multiplication
            # 2) overflow when scaling self in division

            # sgn_self is 1 when new_self is positive else it's 0
            # The comparison is different is new_self is a torch tensor or an AST
            sgn_self = (
                (new_self < self.field // 2).long()
                if isinstance(new_self, torch.Tensor)
                else new_self > 0
github OpenMined / PySyft / syft / frameworks / torch / tensors / interpreters / precision.py View on Github external
raise NotImplementedError(
                    "Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
                )

        elif (
            cmd == "mul"
            and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
            and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
        ):
            # If we try to multiply a FPT>torch.tensor with a FPT>AST,
            # we swap operators so that we do the same operation as above
            new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)

        else:
            # Replace all syft tensor with their child attribute
            new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)

            # To avoid problems with negative numbers
            # we take absolute value of the operands
            # The problems could be 1) bad truncation for multiplication
            # 2) overflow when scaling self in division

            # sgn_self is 1 when new_self is positive else it's 0
            # The comparison is different is new_self is a torch tensor or an AST
            sgn_self = (
                (new_self < self.field // 2).long()
                if isinstance(new_self, torch.Tensor)
                else new_self > 0
            )
            pos_self = new_self * sgn_self
            neg_self = (
                (self.field - new_self) * (1 - sgn_self)
github OpenMined / PySyft / syft / frameworks / torch / tensors / decorators / sensitivity.py View on Github external
def sqrt(self, *args, **kwargs):
        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "sqrt", self, args, kwargs
        )

        # Send it to the appropriate class and get the response
        response = getattr(new_self, "sqrt")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("sqrt", response, wrap_type=type(self))

        l = self.l.sqrt(*args)
        h = self.h.sqrt(*args)
        el = self.el.sqrt(*args)
        eh = self.eh.sqrt(*args)

        response.l = l
        response.h = h
github OpenMined / PySyft / syft / frameworks / torch / tensors / decorators / sensitivity.py View on Github external
def sum(self, *args, **kwargs):
        """
        Here is the version of the add method without the decorator: as you can see
        it is much more complicated. However you might need sometimes to specify
        some particular behaviour: so here what to start from :)
        """
        # Replace all syft tensor with their child attribute
        new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
            "sum", self, args, kwargs
        )

        # Send it to the appropriate class and get the response
        response = getattr(new_self, "sum")(*new_args, **new_kwargs)

        # Put back SyftTensor on the tensors found in the response
        response = hook_args.hook_response("sum", response, wrap_type=type(self))


        l = self.l.sum(*args, **kwargs)
        h = self.h.sum(*args, **kwargs)
        el = self.el.sum(*args, **kwargs)
        eh = self.eh.sum(*args, **kwargs)

        response.l = l
github OpenMined / PySyft / syft / generic / frameworks / hook / hook.py View on Github external
method = getattr(self, f"native_{method_name}")
                # Run the native function with the new args

                try:
                    response = method(*args, **kwargs)

                except BaseException as e:
                    # we can make some errors more descriptive with this method
                    raise route_method_exception(e, self, args, kwargs)

            else:  # means that there is a wrapper to remove

                try:
                    # Replace all torch tensor with their child attribute
                    new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
                        method_name, self, args, kwargs
                    )

                except BaseException as e:  # if there's a type mismatch, try to fix it!

                    try:
                        # if the first argument has no child (meaning it's probably raw data),
                        # try wrapping it with the type of self. We have to except PointerTensor
                        # because otherwise it can lead to inadvertently sending data to another
                        # machine
                        if not hasattr(args[0], "child") and not isinstance(
                            self.child, PointerTensor
                        ):
                            # TODO: add check to make sure this isn't getting around a security class

                            _args = list()