-
Notifications
You must be signed in to change notification settings - Fork 49
/
lstm.py
106 lines (84 loc) · 3.35 KB
/
lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from modules import meLSTM
torch.manual_seed(1111)
# Hyper Parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
k = 100
simplified = False
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data/',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# RNN Model (Many-to-One)
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes,
bias=True, grad_clip=None, k=1, simplified=False):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = meLSTM(input_size, hidden_size, num_layers=num_layers,
bias=bias, return_sequences=False, grad_clip=None,
k=k, simplified=simplified)
self.fc = nn.Linear(hidden_size, num_classes, bias=bias)
def forward(self, x):
# Set initial states
zeros = Variable(torch.zeros(x.size(0), self.hidden_size))
initial_states = [(zeros, zeros)] * self.num_layers
# Forward propagate RNN
out, _ = self.rnn(x, initial_states)
# Decode hidden state of last time step
out = self.fc(out)
return out
rnn = RNNModel(input_size, hidden_size, num_layers, num_classes,
bias=True, grad_clip=1, k=k, simplified=simplified)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, sequence_length, input_size))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = rnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(rnn.state_dict(), 'rnn.pkl')