-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbirnn_model.py
38 lines (30 loc) · 1.71 KB
/
birnn_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super().__init__()
#TO-DO
#1. Initialize Embedding Layer
#2. Initialize RNN layer
#3. Initialize a fully connected layer with Linear transformation
#4. Initialize Dropout
self.embedding = nn.Embedding(num_embeddings = vocab_size,
embedding_dim = embedding_dim,
padding_idx = pad_idx)
self.rnn = nn.RNN(input_size = embedding_dim,
hidden_size = hidden_dim,
num_layers = n_layers,
dropout = dropout,
bidirectional = True)
self.fc = nn.Linear(hidden_dim * n_layers, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
#text = [sent_len, batch_size]
#TO-DO
#1. Apply embedding layer that matches each word to its vector and apply dropout. Dim [sent_len, batch_size, emb_dim]
#2. Run the RNN along the sentences of length sent_len. #output = [sent len, batch size, hid dim * num directions]; #hidden = [num layers * num directions, batch size, hid dim]
#3. Concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers and apply dropout
embedding_dropout = self.dropout(self.embedding(text))
output, hidden = self.rnn(embedding_dropout)
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
return self.fc(hidden)