-
Notifications
You must be signed in to change notification settings - Fork 43
/
Copy pathtrain.py
44 lines (40 loc) · 1.69 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from torch.autograd import Variable
import torch.nn as nn
import torch
from LSTMModel import lstm
from parser_my import args
from dataset import getData
def train():
model = lstm(input_size=args.input_size, hidden_size=args.hidden_size, num_layers=args.layers , output_size=1, dropout=args.dropout, batch_first=args.batch_first )
model.to(args.device)
criterion = nn.MSELoss() # 定义损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # Adam梯度下降 学习率=0.001
close_max, close_min, train_loader, test_loader = getData(args.corpusFile,args.sequence_length,args.batch_size )
for i in range(args.epochs):
total_loss = 0
for idx, (data, label) in enumerate(train_loader):
if args.useGPU:
data1 = data.squeeze(1).cuda()
pred = model(Variable(data1).cuda())
# print(pred.shape)
pred = pred[1,:,:]
label = label.unsqueeze(1).cuda()
# print(label.shape)
else:
data1 = data.squeeze(1)
pred = model(Variable(data1))
pred = pred[1, :, :]
label = label.unsqueeze(1)
loss = criterion(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print(total_loss)
if i % 10 == 0:
# torch.save(model, args.save_file)
torch.save({'state_dict': model.state_dict()}, args.save_file)
print('第%d epoch,保存模型' % i)
# torch.save(model, args.save_file)
torch.save({'state_dict': model.state_dict()}, args.save_file)
train()