Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
new_self = other.child
new_other = self
elif cmd == "div":
# TODO how to divide by AST?
raise NotImplementedError(
"Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
)
elif (
cmd == "mul"
and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
):
# If we try to multiply a FPT>torch.tensor with a FPT>AST,
# we swap operators so that we do the same operation as above
new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)
else:
# Replace all syft tensor with their child attribute
new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)
# To avoid problems with negative numbers
# we take absolute value of the operands
# The problems could be 1) bad truncation for multiplication
# 2) overflow when scaling self in division
# sgn_self is 1 when new_self is positive else it's 0
# The comparison is different is new_self is a torch tensor or an AST
sgn_self = (
(new_self < self.field // 2).long()
if isinstance(new_self, torch.Tensor)
else new_self > 0
new_self = other.child
new_other = self
elif cmd == "div":
# TODO how to divide by AST?
raise NotImplementedError(
"Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
)
elif (
cmd == "mul"
and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
):
# If we try to multiply a FPT>torch.tensor with a FPT>AST,
# we swap operators so that we do the same operation as above
new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)
else:
# Replace all syft tensor with their child attribute
new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)
# To avoid problems with negative numbers
# we take absolute value of the operands
# The problems could be 1) bad truncation for multiplication
# 2) overflow when scaling self in division
# sgn_self is 1 when new_self is positive else it's 0
# The comparison is different is new_self is a torch tensor or an AST
sgn_self = (
(new_self < self.field // 2).long()
if isinstance(new_self, torch.Tensor)
else new_self > 0
raise NotImplementedError(
"Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented"
)
elif (
cmd == "mul"
and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))
and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))
):
# If we try to multiply a FPT>torch.tensor with a FPT>AST,
# we swap operators so that we do the same operation as above
new_self, new_other, _ = hook_args.unwrap_args_from_method("mul", self, other, None)
else:
# Replace all syft tensor with their child attribute
new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)
# To avoid problems with negative numbers
# we take absolute value of the operands
# The problems could be 1) bad truncation for multiplication
# 2) overflow when scaling self in division
# sgn_self is 1 when new_self is positive else it's 0
# The comparison is different is new_self is a torch tensor or an AST
sgn_self = (
(new_self < self.field // 2).long()
if isinstance(new_self, torch.Tensor)
else new_self > 0
)
pos_self = new_self * sgn_self
neg_self = (
(self.field - new_self) * (1 - sgn_self)
import time
import torch
import syft as sy
from grid.workers import WebsocketIOServerWorker
def _payload(location):
x = torch.tensor([10, 20, 30, 40, 50.0])
x.send(location)
hook = sy.TorchHook(torch)
server_worker = WebsocketIOServerWorker(
hook, "localhost", 5000, log_msgs=True, payload=_payload
)
def test_client_id():
android = server_worker.socketio.test_client(server_worker.app)
android.emit("client_id", "android")
assert len(server_worker.clients) == 1
android.disconnect()
server_worker.terminate()
def test_payload_execution():
android = server_worker.socketio.test_client(server_worker.app)
android.emit("client_id", "android")
def test_fit(fit_dataset_key, epochs):
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
fed_client = federated.FederatedClient()
dataset = sy.BaseDataset(data, target)
dataset_key = "gaussian_mixture"
fed_client.add_dataset(dataset, key=dataset_key)
def loss_fn(pred, target):
return torch.nn.functional.cross_entropy(input=pred, target=target)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(2, 3)
self.fc2 = torch.nn.Linear(3, 2)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
def test_masked_scatter_braodcasting_1(self):
t = TensorBase(np.ones((2, 3)))
source = TensorBase([1, 2, 3, 4, 5, 6])
mask = TensorBase([0, 1, 0])
t.masked_scatter_(mask, source)
self.assertTrue(np.array_equal(t, TensorBase([[1, 1, 1], [1, 2, 1]])))
def test_scatter_numerical_3(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 0
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
def test_log_1p(self):
t1 = TensorBase(np.array([1, 2, 3]))
self.assertTrue(np.allclose((t1.log1p()).data, [0.69314718, 1.09861229, 1.38629436]))
def test_lerp_(self):
t1 = TensorBase(np.array([1, 2, 3, 4]))
t2 = TensorBase(np.array([3, 4, 5, 6]))
weight = 0.5
t1.lerp_(t2, weight)
self.assertTrue(np.array_equal(t1.data, [2, 3, 4, 5]))
def testMode_axis_col(self):
t1 = TensorBase([[1, 2, 3, 4, 5, 1, 1, 1, 1, 1], [1, 2, 3, 4, 4, 5, 6, 7, 8, 1]])
self.assertTrue(t1.mode(axis=0), np.array([[[1, 2, 3, 4, 4, 1, 1, 1, 1, 1]], [[2, 2, 2, 2, 1, 1, 1, 1, 1, 2]]]))