-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_gradgen.py
89 lines (63 loc) · 2.68 KB
/
train_gradgen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, WeightedRandomSampler
from torch.optim import AdamW
import os, random, argparse
# custom
from Classifiers.ResNet18 import ResNet18
from NoiseGen.Generator import *
from GradPredModel import *
from GradTrainer import GradTrainer
from utils.MakeDataset import MakeDataset
# reproducibility
def initialization(seed = 0):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi-GPU
cudnn.deterministic = True
cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
parser = argparse.ArgumentParser(description='Settings for PrefixModel training')
parser.add_argument('-n', '--exp_name', type=str, default='no_name', help='name of the experiment')
parser.add_argument('-device', '--device', type = int, default=0, help='device number to use (if available)')
parser.add_argument('-train_bs', '--train_bs', type = int, default=20, help='train batch size')
parser.add_argument('-test_bs', '--test_bs', type = int, default=20, help='test batch size')
args = parser.parse_args()
#------------------Settings--------------------
#reproducibility
random_seed=42
initialization(seed=random_seed)
print("random_seed :", random_seed)
total_epochs = 55
LR = 5e-2
TEST_BATCH_SIZE = 20
TRAIN_BATCH_SIZE = 20
base_dir = './data'
train_dataset = MakeDataset(base_dir, 'training_set')
test_dataset = MakeDataset(base_dir, 'test_set')
train_dataloader = DataLoader(dataset=train_dataset, batch_size=TRAIN_BATCH_SIZE,
shuffle=True, num_workers=8)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=TEST_BATCH_SIZE,
shuffle=False, num_workers=8)
#============Experiment================
torch.cuda.empty_cache()
MODEL_NAME = args.exp_name
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda:'+ str(args.device) if USE_CUDA else 'cpu')
print("Using device: ", device)
#which generator to use
grad_generator = GeneratorVAE()
attacker_opt = AdamW(grad_generator.parameters(), lr=LR, weight_decay = 0.01)
classifier = ResNet18(num_classes=2)
params = torch.load('./train_record/resnet18/best_model',\
map_location='cuda:'+str(args.device))
classifier.load_state_dict(params)
classifier_opt = AdamW(classifier.parameters(), lr=LR, weight_decay = 0.01)
trainer = GradTrainer(grad_generator, classifier, MODEL_NAME, train_dataloader, test_dataloader, attacker_opt, args.device,
accum_iter = 5)
trainer.train(total_epochs)
torch.cuda.empty_cache()
#============Experiment================