-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathtransformer_model.py
148 lines (120 loc) · 5.09 KB
/
transformer_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from torch import nn, einsum
from einops import rearrange
import numpy as np
### POSITION EMBEDDINGS ###
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
### MODEL ###
class FFN(nn.Module):
def __init__(self,
dim,
mult=4,
dropout=0.,
):
"""
FFN (FeedForward Network)
:param dim: model dimension (number of features)
:param mult: multiply the model dimension by mult to get the FFN's inner dimension
:param dropout: dropout between 0 and 1
"""
super().__init__()
inner_dim = int(dim * mult)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim), # (BSZ, num_patches, inner_dim)
nn.GELU(), # (BSZ, num_patches, inner_dim)
nn.Dropout(dropout), # (BSZ, num_patches, inner_dim)
nn.Linear(inner_dim, dim) # (BSZ, num_patches, dim)
)
self.input_norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.input_norm(x) # (BSZ, num_patches, dim)
return self.net(x) # (BSZ, num_patches, dim)
class Attention(nn.Module):
def __init__(self,
dim,
num_heads=8,
dropout=0.,
):
"""
Self-Attention module
:param dim: model dimension (number of features)
:param num_heads: number of attention heads
:param dropout: dropout between 0 and 1
"""
super().__init__()
self.num_heads = num_heads
assert dim % num_heads == 0, 'dim must be evenly divisible by num_heads'
dim_head = int(dim / num_heads)
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, dim * 3, bias=False)
self.to_out = nn.Linear(dim, dim)
self.input_norm = nn.LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.input_norm(x) # (BSZ, num_patches, dim)
q, k, v = self.to_qkv(x).chunk(3, dim=-1) # (BSZ, num_patches, dim)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads), (q, k, v)) # (BSZ, num_heads, num_patches, dim_head)
attention_scores = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale # (BSZ, num_heads, num_patches, num_patches)
attn = attention_scores.softmax(dim=-1) # (BSZ, num_heads, num_patches, num_patches)
attn = self.dropout(attn) # (BSZ, num_heads, num_patches, num_patches)
out = einsum('b h i j, b h j d -> b h i d', attn, v) # (BSZ, num_heads, num_patches, dim_head)
out = rearrange(out, 'b h n d -> b n (h d)') # (BSZ, num_patches, dim)
return self.to_out(out) # (BSZ, num_patches, dim)
class BaseTransformer(nn.Module):
def __init__(self,
dim,
depth,
num_heads=8,
attn_dropout=0.,
ff_dropout=0.,
ff_mult=4,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim=dim, num_heads=num_heads, dropout=attn_dropout),
FFN(dim=dim, mult=ff_mult, dropout=ff_dropout),
]))
self.norm_out = nn.LayerNorm(dim)
def forward(self, x):
for self_attn, ffn in self.layers:
x = self_attn(x) + x # (BSZ, num_patches, dim)
x = ffn(x) + x # (BSZ, num_patches, dim)
return self.norm_out(x) # (BSZ, num_patches, dim)