-
Notifications
You must be signed in to change notification settings - Fork 0
/
Trainer.py
156 lines (116 loc) · 4.85 KB
/
Trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from torch.autograd.variable import *
import torch.optim as optim
# DDP Imports
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os
import numpy as np
import h5py
import json
import numpy as np
import numpy.random as random
import glob
class trainer:
def __init__(
self,
model,
train_data,
val_data,
optimizer,
save_every,
outdir,
loss,
args
):
self.gpu_id = args.gpu
self.global_rank = args.rank
self.model = model.to(self.gpu_id)
self.model.gpu_id = self.gpu_id
self.model = DDP(self.model, device_ids=[self.gpu_id],find_unused_parameters=False)
self.train_data = train_data
self.val_data = val_data
self.optimizer = optimizer
self.save_every = save_every
self.epochs_run = 0
self.outdir = outdir
self.loss = loss
self.loss_vals_training = []
self.loss_vals_validation = []
self.best_val_loss = 10e10
def _run_epoch_val(self, epoch):
with torch.no_grad():
b_sz = len(next(iter(self.val_data))[0])
loss_validation = []
self.model.train(False)
for istep, (x,y) in enumerate(self.val_data):
if istep != len(self.train_data) - 1:
lt, output = self._run_batch_val(istep, x,y)
loss_validation.append(lt)
epoch_val_loss = np.mean(loss_validation)
self.loss_vals_validation.append(epoch_val_loss)
def _run_batch_val(self, istep, x, y):
x = x.to(self.gpu_id)
y = y.to(self.gpu_id)
self.model.eval()
for param in self.model.parameters():
param.grad = None
x = x.to(self.gpu_id)
output = self.model(x)
l = self.loss(output, y)
torch.cuda.empty_cache()
return l.item(),output
def _run_batch_train(self, istep, x,y):
self.model.train(True)
for param in self.model.parameters():
param.grad = None
x = x.to(self.gpu_id)
y = y.to(self.gpu_id)
output = self.model(x)
self.optimizer.zero_grad()
l = self.loss(output, y)
l.backward()
self.optimizer.step()
self.optimizer.zero_grad()
torch.cuda.empty_cache()
return l.item(), output
def _run_epoch_train(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
loss_training = []
self.model.train(True)
train_data_length = len(self.val_data)
for istep, (x,y) in enumerate(self.train_data):
if istep != len(self.train_data) - 1:
lt, output = self._run_batch_train(istep, x,y)
loss_training.append(lt)
epoch_train_loss = np.mean(loss_training)
self.loss_vals_training.append(epoch_train_loss)
def get_normalisation_weight(self,len_current_samples, len_of_longest_samples):
return np.ones(len_current_samples) * (len_of_longest_samples / len_current_samples)
def _save_snapshot(self, epoch):
torch.save(self.model.state_dict(),"{}/epoch_{}_{}_loss_{}_{}.pth".format(self.outdir,epoch,'test_model',round(self.loss_vals_training[-1],4),round(self.loss_vals_validation[-1],4)))
if self.global_rank == 0:
print(f" Training snapshot saved")
def train(self, max_epochs: int):
self.model.train(True)
np.random.seed(max_epochs)
random.seed(max_epochs)
model_dir = self.outdir
os.system("mkdir -p ./"+model_dir)
for epoch in range(max_epochs):
if epoch<max_epochs:
self._run_epoch_train(epoch)
self._run_epoch_val(epoch)
if self.global_rank == 0:
print(f"[GPU{self.global_rank}] Epoch {epoch} | Steps: {len(self.train_data)} | Train Loss: {round(self.loss_vals_training[-1],4)} | Val Loss: {round(self.loss_vals_validation[-1],4)}")
if self.global_rank == 0:
if (epoch % self.save_every == 0 or epoch == max_epochs-1):
self._save_snapshot(epoch)
if self.loss_vals_validation[-1] < self.best_val_loss:
self.best_val_loss = self.loss_vals_validation[-1]
torch.save(self.model.state_dict(),"{}/best_model.pth".format(self.outdir))
torch.cuda.empty_cache()