Skip to content

Commit

Permalink
remove opacus and DP
Browse files Browse the repository at this point in the history
  • Loading branch information
TsingZ0 committed Jun 3, 2024
1 parent 5012fce commit 004a405
Show file tree
Hide file tree
Showing 7 changed files with 1 addition and 80 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Figure 1: An Example for FedAvg. You can create a scenario using `generate_DATA.

The origin of the **statistical heterogeneity** phenomenon is the personalization of users, who generate non-IID (not Independent and Identically Distributed) and unbalanced data. With statistical heterogeneity existing in the FL scenario, a myriad of approaches have been proposed to crack this hard nut. In contrast, the personalized FL (pFL) may take advantage of the statistically heterogeneous data to learn the personalized model for each user.

Thanks to [@Stonesjtu](https://github.com/Stonesjtu/pytorch_memlab/blob/d590c489236ee25d157ff60ecd18433e8f9acbe3/pytorch_memlab/mem_reporter.py#L185), this library can also record the **GPU memory usage** for the model. By using the package [opacus](https://opacus.ai/), we introduce **DP (differential privacy)** into this library (please refer to `./system/flcore/clients/clientavg.py` for example). Following [FedCG](https://www.ijcai.org/proceedings/2022/0324.pdf), we also introduce the **[DLG (Deep Leakage from Gradients)](https://papers.nips.cc/paper_files/paper/2019/hash/60a6c4002cc7b29142def8871531281a-Abstract.html) attack** and **PSNR (Peak Signal-to-Noise Ratio) metric** to evaluate the privacy-preserving ability of tFL/pFL algorithms (please refer to `./system/flcore/servers/serveravg.py` for example). *Now we can train on some clients and evaluate performance on other new clients by setting `args.num_new_clients` in `./system/main.py`. Note that not all the tFL/pFL algorithms support this feature.*
Thanks to [@Stonesjtu](https://github.com/Stonesjtu/pytorch_memlab/blob/d590c489236ee25d157ff60ecd18433e8f9acbe3/pytorch_memlab/mem_reporter.py#L185), this library can also record the **GPU memory usage** for the model. Following [FedCG](https://www.ijcai.org/proceedings/2022/0324.pdf), we also introduce the **[DLG (Deep Leakage from Gradients)](https://papers.nips.cc/paper_files/paper/2019/hash/60a6c4002cc7b29142def8871531281a-Abstract.html) attack** and **PSNR (Peak Signal-to-Noise Ratio) metric** to evaluate the privacy-preserving ability of tFL/pFL algorithms (please refer to `./system/flcore/servers/serveravg.py` for example). *Now we can train on some clients and evaluate performance on other new clients by setting `args.num_new_clients` in `./system/main.py`. Note that not all the tFL/pFL algorithms support this feature.*

**Citation**

Expand Down
1 change: 0 additions & 1 deletion env_cuda_latest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ dependencies:
- torchvision
- calmsize
- memory-profiler
- opacus
- portalocker
- cvxpy
- higher
Expand Down
16 changes: 0 additions & 16 deletions system/flcore/clients/clientavg.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import numpy as np
import time
from flcore.clients.clientbase import Client
from utils.privacy import *


class clientAVG(Client):
Expand All @@ -31,12 +30,6 @@ def train(self):
trainloader = self.load_train_data()
# self.model.to(self.device)
self.model.train()

# differential privacy
if self.privacy:
model_origin = copy.deepcopy(self.model)
self.model, self.optimizer, trainloader, privacy_engine = \
initialize_dp(self.model, self.optimizer, trainloader, self.dp_sigma)

start_time = time.time()

Expand Down Expand Up @@ -66,12 +59,3 @@ def train(self):

self.train_time_cost['num_rounds'] += 1
self.train_time_cost['total_cost'] += time.time() - start_time

if self.privacy:
eps, DELTA = get_dp_params(privacy_engine)
print(f"Client {self.id}", f"epsilon = {eps:.2f}, sigma = {DELTA}")

for param, param_dp in zip(model_origin.parameters(), self.model.parameters()):
param.data = param_dp.data.clone()
self.model = model_origin
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
3 changes: 0 additions & 3 deletions system/flcore/clients/clientbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,6 @@ def __init__(self, args, id, train_samples, test_samples, **kwargs):
self.train_time_cost = {'num_rounds': 0, 'total_cost': 0.0}
self.send_time_cost = {'num_rounds': 0, 'total_cost': 0.0}

self.privacy = args.privacy
self.dp_sigma = args.dp_sigma

self.loss = nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
self.learning_rate_scheduler = torch.optim.lr_scheduler.ExponentialLR(
Expand Down
16 changes: 0 additions & 16 deletions system/flcore/clients/clientntd.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import time
import torch.nn.functional as F
from flcore.clients.clientbase import Client
from utils.privacy import *


class clientNTD(Client):
Expand All @@ -39,12 +38,6 @@ def train(self):
trainloader = self.load_train_data()
# self.model.to(self.device)
self.model.train()

# differential privacy
if self.privacy:
model_origin = copy.deepcopy(self.model)
self.model, self.optimizer, trainloader, privacy_engine = \
initialize_dp(self.model, self.optimizer, trainloader, self.dp_sigma)

start_time = time.time()

Expand Down Expand Up @@ -76,15 +69,6 @@ def train(self):

self.train_time_cost['num_rounds'] += 1
self.train_time_cost['total_cost'] += time.time() - start_time

if self.privacy:
eps, DELTA = get_dp_params(privacy_engine)
print(f"Client {self.id}", f"epsilon = {eps:.2f}, sigma = {DELTA}")

for param, param_dp in zip(model_origin.parameters(), self.model.parameters()):
param.data = param_dp.data.clone()
self.model = model_origin
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)

def set_parameters(self, model):
for new_param, old_param in zip(model.parameters(), self.model.parameters()):
Expand Down
6 changes: 0 additions & 6 deletions system/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,6 @@ def run(args):
help="Running times")
parser.add_argument('-eg', "--eval_gap", type=int, default=1,
help="Rounds gap for evaluation")
parser.add_argument('-dp', "--privacy", type=bool, default=False,
help="differential privacy")
parser.add_argument('-dps', "--dp_sigma", type=float, default=0.0)
parser.add_argument('-sfn', "--save_folder_name", type=str, default='items')
parser.add_argument('-ab', "--auto_break", type=bool, default=False)
parser.add_argument('-dlg', "--dlg_eval", type=bool, default=False)
Expand Down Expand Up @@ -515,9 +512,6 @@ def run(args):
print("Number of classes: {}".format(args.num_classes))
print("Backbone: {}".format(args.model))
print("Using device: {}".format(args.device))
print("Using DP: {}".format(args.privacy))
if args.privacy:
print("Sigma for DP: {}".format(args.dp_sigma))
print("Auto break: {}".format(args.auto_break))
if not args.auto_break:
print("Global rounds: {}".format(args.global_rounds))
Expand Down
37 changes: 0 additions & 37 deletions system/utils/privacy.py

This file was deleted.

0 comments on commit 004a405

Please sign in to comment.