-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
50 lines (44 loc) · 1.99 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNN(nn.Module):
def __init__(self, vocab_len, emb_dim,
hidden_size, output_size, dropout=0.):
super(RNN, self).__init__()
self.emb_size = emb_dim
self.hidden_size = hidden_size
self.emb = nn.Embedding(vocab_len + 1, emb_dim, padding_idx=vocab_len)
self.lstm = nn.LSTM(emb_dim, hidden_size, 1, batch_first=True)
#self.bn = nn.BatchNorm1d(hidden_size)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(hidden_size, hidden_size)
self.classifier = nn.Linear(hidden_size, output_size)
self.relu = nn.ReLU()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def forward(self, inp, seq_lens):
bs = inp.shape[0]
hidden = torch.zeros(1, bs, self.hidden_size).to(self.device)
inp = self.emb(inp)
data = pack_padded_sequence(inp, seq_lens, batch_first=True, enforce_sorted=False)
out_packed, (h, c) = self.lstm(data, (hidden, hidden))
out_padded, lengths = pad_packed_sequence(out_packed, batch_first=True)
out = torch.flatten(torch.permute(h, (1, 0, 2)), start_dim=1)
out = self.dropout(out)
out = self.relu(self.fc(out))
out = self.classifier(out)
return out, out_padded
def infer(self, inp):
bs = inp.shape[0]
hidden = torch.zeros(1, bs, self.hidden_size)
inp = self.emb(inp)
acts, (h, c) = self.lstm(inp, (hidden, hidden))
out = torch.flatten(torch.permute(h, (1, 0, 2)), start_dim=1)
out = self.dropout(out)
out = self.fc(out)
out = self.relu(out)
out = self.classifier(out)
return out
def get_model(vocab_len, emb_dim, n_hidden, n_categories, dropout):
rnn = RNN(vocab_len, emb_dim, n_hidden, n_categories, dropout=dropout)
return rnn