Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_fit(fit_dataset_key, epochs):
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
fed_client = federated.FederatedClient()
dataset = sy.BaseDataset(data, target)
dataset_key = "gaussian_mixture"
fed_client.add_dataset(dataset, key=dataset_key)
def loss_fn(pred, target):
return torch.nn.functional.cross_entropy(input=pred, target=target)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(2, 3)
self.fc2 = torch.nn.Linear(3, 2)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
key = "mnist"
else:
dataset = sy.BaseDataset(
data=mnist_dataset.data,
targets=mnist_dataset.targets,
transform=mnist_dataset.transform,
)
key = "mnist_testing"
server.add_dataset(dataset, key=key)
# Setup toy data (vectors example)
data_vectors = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True)
target_vectors = torch.tensor([[1], [0], [1], [0]])
server.add_dataset(sy.BaseDataset(data_vectors, target_vectors), key="vectors")
# Setup toy data (xor example)
data_xor = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target_xor = torch.tensor([1.0, 1.0, 0.0, 0.0], requires_grad=False)
server.add_dataset(sy.BaseDataset(data_xor, target_xor), key="xor")
# Setup gaussian mixture dataset
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
server.add_dataset(sy.BaseDataset(data, target), key="gaussian_mixture")
# Setup partial iris dataset
data, target = utils.iris_data_partial()
dataset = sy.BaseDataset(data, target)
dataset_key = "iris"
server.add_dataset(dataset, key=dataset_key)
indices = np.isin(mnist_dataset.targets, keep_labels).astype("uint8")
logger.info("number of true indices: %s", indices.sum())
selected_data = (
torch.native_masked_select(mnist_dataset.data.transpose(0, 2), torch.tensor(indices))
.view(28, 28, -1)
.transpose(2, 0)
)
logger.info("after selection: %s", selected_data.shape)
selected_targets = torch.native_masked_select(mnist_dataset.targets, torch.tensor(indices))
dataset = sy.BaseDataset(
data=selected_data, targets=selected_targets, transform=mnist_dataset.transform
)
key = "mnist"
else:
dataset = sy.BaseDataset(
data=mnist_dataset.data,
targets=mnist_dataset.targets,
transform=mnist_dataset.transform,
)
key = "mnist_testing"
server.add_dataset(dataset, key=key)
# Setup toy data (vectors example)
data_vectors = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True)
target_vectors = torch.tensor([[1], [0], [1], [0]])
server.add_dataset(sy.BaseDataset(data_vectors, target_vectors), key="vectors")
# Setup toy data (xor example)
data_xor = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
def test_train_config_with_jit_trace(hook, workers): # pragma: no cover
alice = workers["alice"]
data = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True)
target = torch.tensor([[1], [0], [1], [0]])
dataset = sy.BaseDataset(data, target)
alice.add_dataset(dataset, key="gaussian_mixture")
@hook.torch.jit.script
def loss_fn(pred, target):
return ((target.float() - pred.float()) ** 2).mean()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 3)
self.fc2 = nn.Linear(3, 2)
self.fc3 = nn.Linear(2, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
def prepare_training(hook, alice): # pragma: no cover
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
dataset_key = "gaussian_mixture"
dataset = sy.BaseDataset(data, target)
alice.add_dataset(dataset, key=dataset_key)
@hook.torch.jit.script
def loss_fn(pred, target):
return ((target.float() - pred.float()) ** 2).mean()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 3)
self.fc2 = nn.Linear(3, 2)
self.fc3 = nn.Linear(2, 1)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.xavier_uniform_(self.fc3.weight)
def test_fl_with_trainconfig(isolated_filesystem, start_remote_server_worker_only, hook):
os.chdir("advanced/Federated Learning with TrainConfig/")
notebook = "Introduction to TrainConfig.ipynb"
p_name = Path("examples/tutorials/advanced/Federated Learning with TrainConfig/")
not_excluded_notebooks.remove(p_name / notebook)
hook.local_worker.remove_worker_from_registry("alice")
kwargs = {"id": "alice", "host": "localhost", "port": 8777, "hook": hook}
data = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target = torch.tensor([[1.0], [1.0], [0.0], [0.0]], requires_grad=False)
dataset = sy.BaseDataset(data, target)
process_remote_worker = start_remote_server_worker_only(dataset=(dataset, "xor"), **kwargs)
res = pm.execute_notebook(notebook, "/dev/null", timeout=300)
assert isinstance(res, nbformat.notebooknode.NotebookNode)
process_remote_worker.terminate()
sy.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
def main(**kwargs): # pragma: no cover
"""Helper function for spinning up a websocket participant."""
# Create websocket worker
worker = WebsocketServerWorker(**kwargs)
# Setup toy data (xor example)
data = th.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target = th.tensor([[1.0], [1.0], [0.0], [0.0]], requires_grad=False)
# Create a dataset using the toy data
dataset = sy.BaseDataset(data, target)
# Tell the worker about the dataset
worker.add_dataset(dataset, key="xor")
# Start worker
worker.start()
return worker
# We just use MNIST in this example but a worker can store multiple datasets
# Setup toy data (vectors example)
data_vectors = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True)
target_vectors = torch.tensor([[1], [0], [1], [0]])
server.add_dataset(sy.BaseDataset(data_vectors, target_vectors), key="vectors")
# Setup toy data (xor example)
data_xor = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target_xor = torch.tensor([1.0, 1.0, 0.0, 0.0], requires_grad=False)
server.add_dataset(sy.BaseDataset(data_xor, target_xor), key="xor")
# Setup gaussian mixture dataset
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
server.add_dataset(sy.BaseDataset(data, target), key="gaussian_mixture")
logger.info("datasets: %s", server.datasets)
logger.info("len(datasets[mnist]): %s", len(server.datasets["mnist"]))
server.start()
return server
.transpose(2, 0)
)
logger.info("after selection: %s", selected_data.shape)
selected_targets = torch.native_masked_select(mnist_trainset.targets, torch.tensor(indices))
dataset = sy.BaseDataset(
data=selected_data, targets=selected_targets, transform=mnist_trainset.transform
)
server.add_dataset(dataset, key="mnist")
# We just use MNIST in this example but a worker can store multiple datasets
# Setup toy data (vectors example)
data_vectors = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]], requires_grad=True)
target_vectors = torch.tensor([[1], [0], [1], [0]])
server.add_dataset(sy.BaseDataset(data_vectors, target_vectors), key="vectors")
# Setup toy data (xor example)
data_xor = torch.tensor([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]], requires_grad=True)
target_xor = torch.tensor([1.0, 1.0, 0.0, 0.0], requires_grad=False)
server.add_dataset(sy.BaseDataset(data_xor, target_xor), key="xor")
# Setup gaussian mixture dataset
data, target = utils.create_gaussian_mixture_toy_data(nr_samples=100)
server.add_dataset(sy.BaseDataset(data, target), key="gaussian_mixture")
logger.info("datasets: %s", server.datasets)
logger.info("len(datasets[mnist]): %s", len(server.datasets["mnist"]))
server.start()
return server