Skip to content

Commit

Permalink
This commit brings following updates:
Browse files Browse the repository at this point in the history
1. Remove unnecessary spaces from Workflow_Interface_102_Aggregator_Validation.ipynb
2. Upgraded protobuf to 3.20.3 in setup.py as per tensorboard requirements
3. Modified 101-torch-cnn-mnist workspace for n-level arguments
4. Added a federated runtime workspace for 102-aggregator-validation

Signed-off-by: Parth Mandaliya <parthx.mandaliya@intel.com>
  • Loading branch information
ParthM-GitHub committed Sep 28, 2023
1 parent 4a68b40 commit 856fadb
Show file tree
Hide file tree
Showing 16 changed files with 420 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@
"metadata": {},
"outputs": [],
"source": [
"collaborator_names = ['Portland', 'Seattle', 'Chandler','Bangalore']\n",
"collaborator_names = ['Portland', 'Seattle', 'Chandler', 'Bangalore']\n",
"\n",
"def callable_to_initialize_aggregator_private_attributes(n_collaborators, test_dataset, batch_size_train):\n",
" aggregator_test = deepcopy(test_dataset)\n",
Expand Down Expand Up @@ -333,8 +333,8 @@
" local_test.targets = test_dataset.targets[index::n_collaborators]\n",
" \n",
" return {\n",
" 'train_loader': torch.utils.data.DataLoader(local_train,batch_size=batch_size_train, shuffle=True),\n",
" 'test_loader': torch.utils.data.DataLoader(local_test,batch_size=batch_size_train, shuffle=True)\n",
" 'train_loader': torch.utils.data.DataLoader(local_train,batch_size=batch_size_train, shuffle=True),\n",
" 'test_loader': torch.utils.data.DataLoader(local_test,batch_size=batch_size_train, shuffle=True)\n",
" }\n",
"\n",
"collaborators=[]\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ col1:
batch_size: 32
index: 0
n_collaborators: 2
test_dataset: src.collaborator_private_attrs.train_dataset
train_dataset: src.collaborator_private_attrs.test_dataset
test_dataset: src.collaborator_private_attrs.train_dataset
template: src.collaborator_private_attrs.collaborator_private_attrs

col2:
Expand All @@ -22,6 +22,6 @@ col2:
batch_size: 32
index: 1
n_collaborators: 2
test_dataset: src.collaborator_private_attrs.train_dataset
train_dataset: src.collaborator_private_attrs.test_dataset
test_dataset: src.collaborator_private_attrs.train_dataset
template: src.collaborator_private_attrs.collaborator_private_attrs
25 changes: 24 additions & 1 deletion openfl-workspace/experimental/101_torch_cnn_mnist/plan/plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,30 @@ federated_flow:
settings:
model:
template: src.flow.Net
settings: {}
settings:
convolutional_block:
template: src.flow.convolutional_block
settings:
block_sequential:
template: src.flow.sequential_block
settings:
conv2d1:
template: src.flow.conv2d1
settings:
in_channels: 1
out_channels: 10
kernel_size: 5
maxPool2d1:
template: src.flow.maxpool2d1
settings:
kernel_size: 2
relu: src.flow.relu
conv2d2: src.flow.conv2d2
dropout2d: src.flow.dropout2d
maxPool2d2: src.flow.maxpool2d2
relu: src.flow.relu
in_features: 50
out_features: 10
optimizer: null
rounds: 4
checkpoint: true
Expand Down
63 changes: 34 additions & 29 deletions openfl-workspace/experimental/101_torch_cnn_mnist/src/flow.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
# Copyright (C) 2020-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from openfl.experimental.interface import FLSpec
from openfl.experimental.placement import aggregator, collaborator
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import numpy as np

from openfl.experimental.interface import FLSpec
from openfl.experimental.placement import aggregator, collaborator

learning_rate = 0.01
momentum = 0.5
Expand All @@ -18,23 +18,32 @@
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)

convolutional_block = nn.Sequential
sequential_block = nn.Sequential
conv2d1 = nn.Conv2d
conv2d2 = nn.Conv2d(10, 20, 5)
maxpool2d1 = nn.MaxPool2d
maxpool2d2 = nn.MaxPool2d(2)
relu = nn.ReLU()
dropout2d = nn.Dropout2d()


class Net(nn.Module):
def __init__(self):
def __init__(self, convolutional_block,
in_features: int, out_features: int):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.conv_block = convolutional_block
self.linear_block = nn.Sequential(
nn.Linear(320, in_features),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features, out_features)
)

def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = self.conv_block(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = self.linear_block(x)
return F.log_softmax(x)


Expand All @@ -50,13 +59,11 @@ def inference(network, test_loader):
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
print(
"\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
f"\nTest set: Avg. loss: {test_loss:.4f}, Accuracy: "
+ f"{correct}/{len(test_loader.dataset)} "
+ f"({100.0 * correct / len(test_loader.dataset):.0f}%)\n"
)

accuracy = float(correct / len(test_loader.dataset))
return accuracy

Expand Down Expand Up @@ -119,13 +126,12 @@ def train(self):
self.optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch: 1 [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
batch_idx * len(data),
len(self.train_loader.dataset),
100.0 * batch_idx / len(self.train_loader),
loss.item(),
)
f"Train Epoch: 1 [{batch_idx * len(data)}/"
+ f"{len(self.train_loader.dataset)} ("
+ f"{100.0 * batch_idx / len(self.train_loader):.0f}%)"
+ f"]\tLoss: {loss.item():.6f}"
)

self.loss = loss.item()
torch.save(self.model.state_dict(), "model.pth")
torch.save(self.optimizer.state_dict(), "optimizer.pth")
Expand All @@ -136,8 +142,8 @@ def train(self):
def local_model_validation(self):
self.local_validation_score = inference(self.model, self.test_loader)
print(
"Doing local model validation "
+ f"for collaborator {self.input}: {self.local_validation_score}"
"Doing local model validation for collaborator "
+ f"{self.input}: {self.local_validation_score}"
)
self.next(self.join, exclude=["training_completed"])

Expand All @@ -151,8 +157,7 @@ def join(self, inputs):
input.local_validation_score for input in inputs
) / len(inputs)
print(
"Average aggregated model "
+ f"validation values = {self.aggregated_model_accuracy}"
f"Average aggregated model validation values = {self.aggregated_model_accuracy}"
)
print(f"Average training loss = {self.average_loss}")
print(f"Average local model validation values = {self.local_model_accuracy}")
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
current_plan_name: default

Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (C) 2020-2021 Intel Corporation
# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you.

collaborators:

Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
## Copyright (C) 2020-2021 Intel Corporation
# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you.

# all keys under 'collaborators' corresponds to a specific colaborator name the corresponding dictionary has data_name, data_path pairs.
# Note that in the mnist case we do not store the data locally, and the data_path is used to pass an integer that helps the data object
# construct the shard of the mnist dataset to be use for this collaborator.

# collaborator_name ,data_directory_path
col1:
callable_func:
settings:
batch_size: 64
index: 0
n_collaborators: 4
train_dataset: src.collaborator_private_attrs.train_dataset
test_dataset: src.collaborator_private_attrs.test_dataset
template: src.collaborator_private_attrs.callable_to_initialize_collaborator_private_attributes

col2:
callable_func:
settings:
batch_size: 64
index: 1
n_collaborators: 4
train_dataset: src.collaborator_private_attrs.train_dataset
test_dataset: src.collaborator_private_attrs.test_dataset
template: src.collaborator_private_attrs.callable_to_initialize_collaborator_private_attributes

col3:
callable_func:
settings:
batch_size: 64
index: 2
n_collaborators: 4
train_dataset: src.collaborator_private_attrs.train_dataset
test_dataset: src.collaborator_private_attrs.test_dataset
template: src.collaborator_private_attrs.callable_to_initialize_collaborator_private_attributes

col4:
callable_func:
settings:
batch_size: 64
index: 3
n_collaborators: 4
train_dataset: src.collaborator_private_attrs.train_dataset
test_dataset: src.collaborator_private_attrs.test_dataset
template: src.collaborator_private_attrs.callable_to_initialize_collaborator_private_attributes

aggregator:
callable_func:
settings:
n_collaborators: 4
batch_size: 64
test_dataset: src.aggregator_private_attrs.test_dataset
template: src.aggregator_private_attrs.callable_to_initialize_aggregator_private_attributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
../../workspace/plan/defaults

Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright (C) 2020-2021 Intel Corporation
# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you.

aggregator :
defaults : plan/defaults/aggregator.yaml
template : openfl.experimental.component.Aggregator
settings :
rounds_to_train : 1
log_metric_callback :
template : src.utils.write_metric


collaborator :
defaults : plan/defaults/collaborator.yaml
template : openfl.experimental.component.Collaborator
settings : {}


federated_flow:
template: src.flow.AggregatorValidationFlow
settings:
model:
template: src.flow.Net
settings: {}
optimizer: null
rounds: 3
checkpoint: true


network :
defaults : plan/defaults/network.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
torch==1.13.1
torchvision==0.14.1
tensorboard
wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Copyright (C) 2020-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from copy import deepcopy

import torch
import torchvision


mnist_test = torchvision.datasets.MNIST('files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))

test_dataset = mnist_test


def callable_to_initialize_aggregator_private_attributes(n_collaborators,
test_dataset, batch_size):
aggregator_test = deepcopy(test_dataset)
aggregator_test.targets = test_dataset.targets[n_collaborators::n_collaborators + 1]
aggregator_test.data = test_dataset.data[n_collaborators::n_collaborators + 1]

return {
'test_loader': torch.utils.data.DataLoader(
aggregator_test, batch_size=batch_size, shuffle=True)
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Copyright (C) 2020-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from copy import deepcopy

import torch
import torchvision


mnist_train = torchvision.datasets.MNIST('files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))

mnist_test = torchvision.datasets.MNIST('files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))

train_dataset = mnist_train
test_dataset = mnist_test


# Setup collaborators private attributes via callable function
def callable_to_initialize_collaborator_private_attributes(
index, n_collaborators, train_dataset, test_dataset, batch_size):
local_train = deepcopy(train_dataset)
local_test = deepcopy(test_dataset)
local_train.data = train_dataset.data[index::n_collaborators]
local_train.targets = train_dataset.targets[index::n_collaborators]
local_test.data = test_dataset.data[index::n_collaborators]
local_test.targets = test_dataset.targets[index::n_collaborators]

return {
'train_loader': torch.utils.data.DataLoader(
local_train, batch_size=batch_size, shuffle=True),
'test_loader': torch.utils.data.DataLoader(
local_test, batch_size=batch_size, shuffle=True)
}
Loading

0 comments on commit 856fadb

Please sign in to comment.