Here is the official implementation of the model BAFFLE in paper Does Federated Learning Really Need Backpropagation?
python >= 3.6
torch >= 1.2.0
torchvision >= 0.4.0
wandb
ast
torch_ema
# For LeNet
python main.py --K 100 --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --ema --normalization NoNorm
# For WRN-10-2
python main.py --K 100 --net-name wideresnet --depth 10 --width 2 --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --ema
# w/o EMA
python main.py --K 100 --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --normalization NoNorm
# ReLU
python main.py --K 100 --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --normalization NoNorm --ema --activation "ReLU"
# SELU
python main.py --K 100 --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --normalization NoNorm --ema --activation "SELU"
# Central Scheme
python main.py --K 50 --fd-format center --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --normalization NoNorm --ema
# One Epoch Communication
python main.py --K 100 --fedopt FedAvg --dataset MNIST --epochs 20 --lr 1e-2 --optimizer Adam --ema --normalization NoNorm