-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
244 lines (198 loc) · 10.5 KB
/
model.py
File metadata and controls
244 lines (198 loc) · 10.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import math
import torch
import torch.nn as nn
class InputEmbeddings(nn.Module):
def __init__(self, d_model: int, vocab_size: int):
super().__init__()
self.d_model = d_model
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embedding(x) * (self.d_model ** 0.5)
class PositionalEmbedding(nn.Module):
def __init__(self, d_model: int, seq_len: int, dropout: float):
super().__init__()
self.d_model = d_model
self.seq_len = seq_len
self.dropout = nn.Dropout(dropout)
pe = torch.zeros(seq_len, d_model) # empty so far, will be filled with positional embeddings (seq_len, d_model)
position = torch.arange(0, seq_len, dtype=torch.float).unsqueeze(1) # (seq_len, 1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) # -, negative means it's reciprocal so can be multiplied directly
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0) # (1, seq_len, d_model)
self.register_buffer(name="pe", tensor=pe) # pe is non-trainable, moved with .to(device)
def forward(self, x): # x is [batch, seq_len, d_model]
x = x + (self.pe[:, :x.shape[1], :]).requires_grad_(False)
return self.dropout(x)
class LayerNorm(nn.Module):
def __init__(self, eps: float = 1e-6):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1)) # multiplied
self.bias = nn.Parameter(torch.zeros(1)) # added
self.eps = eps
def forward(self, x):
mean = x.mean(dim=1, keepdim=True)
std = x.std(dim=1, keepdim=True)
return self.alpha * (x - mean) / (std + self.eps) + self.bias
class FeedForward(nn.Module):
def __init__(self, d_model: int, d_ff: int, dropout: float):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff) # w1 and b1
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model) # w2 and b2
def forward(self, x):
# Batch, seq_len, d_model -> (batch, seq_len, d_ff) -> (batch, seq_len, d_model)
return self.linear_2(self.dropout(self.linear_1(x)))
class MultiHeadAttention(nn.Module):
def __init__(self, d_model: int, h: int, dropout: float):
super().__init__()
self.d_model = d_model
self.h = h
assert self.d_model % self.h == 0, "d_model is not divisible by h"
self.d_k = self.d_model // self.h
self.w_q = nn.Linear(d_model, d_model)
self.w_k = nn.Linear(d_model, d_model)
self.w_v = nn.Linear(d_model, d_model)
self.w_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
@staticmethod
def attention(query, key, value, mask, dropout: float):
d_k = query.shape[-1]
# (batch, h, seq_len, d_k) @ (batch, h, d_k, seq_len) -> (batch, h, seq_len, seq_len) which is the shape of QK^T before the softmax.
attention_scores = (query @ key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
attention_scores.masked_fill_(mask==0, -1e9)
attention_scores = attention_scores.softmax(dim=-1) # (batch, h, seq_len, seq_len)
if dropout is not None:
attention_scores = dropout(attention_scores)
return (attention_scores @ value), attention_scores # attention_scores can be used for visualization of the scores for each interaction.
def forward(self, q, k, v, mask):
# mask is for removing interactions above major diagonal
query = self.w_q(q) # (batch, seq_len, d_model) -> (batch, seq_len, d_model)
key = self.w_k(k) # (batch, seq_len, d_model) -> (batch, seq_len, d_model)
value = self.w_v(v) # (batch, seq_len, d_model) -> (batch, seq_len, d_model)
# (batch, seq_len, d_model) -> (batch, seq_len, h, d_k) -> (batch, h, seq_len, d_k)
query = query.view(query.shape[0], query.shape[1], self.h, self.d_k).transpose(1, 2)
key = key.view(key.shape[0], key.shape[1], self.h, self.d_k).transpose(1, 2)
value = value.view(value.shape[0], value.shape[1], self.h, self.d_k).transpose(1, 2)
x, self.attention_scores = MultiHeadAttention.attention(query, key, value, mask, self.dropout)
x = x.transpose(1, 2).contiguous().view(x.shape[0], -1, self.h * self.d_k) # (batch, h, seq_len, d_k) -> (batch, seq_len, h, d_k) -> (batch, seq_len, d_model)
# (batch, seq_len, d_model) x (batch, d_model, d_model) -> (batch, seq_len, d_model)
return self.w_o(x)
class ResidualConnection(nn.Module):
def __init__(self, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.norm = LayerNorm()
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class EncoderBlock(nn.Module):
def __init__(self, self_attention: MultiHeadAttention, feed_forward: FeedForward, dropout: float):
super().__init__()
self.self_attention = self_attention
self.feed_forward = feed_forward
self.residual_connections = nn.ModuleList([
ResidualConnection(dropout=dropout) for _ in range(2)
])
def forward(self, x, src_mask):
x = self.residual_connections[0](x, lambda x: self.self_attention(x, x, x, src_mask))
x = self.residual_connections[1](x, lambda x: self.feed_forward(x))
return x
class Encoder(nn.Module):
def __init__(self, layers: nn.ModuleList):
super().__init__()
self.layers = layers
self.norm = LayerNorm()
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class DecoderBlock(nn.Module):
def __init__(self, self_attention: MultiHeadAttention, cross_attention: MultiHeadAttention, feed_forward: FeedForward, dropout: float):
super().__init__()
self.self_attention = self_attention
self.cross_attention = cross_attention
self.feed_forward = feed_forward
self.dropout = dropout
self.residual_connections = nn.ModuleList([
ResidualConnection(dropout) for _ in range(3)
])
def forward(self, x, encoder_output, src_mask, target_mask):
x = self.residual_connections[0](x, lambda x: self.self_attention(x, x, x, target_mask))
x = self.residual_connections[1](x, lambda x: self.cross_attention(x, encoder_output, encoder_output, src_mask))
x = self.residual_connections[2](x, lambda x: self.feed_forward(x))
return x
class Decoder(nn.Module):
def __init__(self, layers: nn.ModuleList):
super().__init__()
self.layers = layers
self.norm = LayerNorm()
def forward(self, x, encoder_output, src_mask, target_mask):
for layer in self.layers:
x = layer(x, encoder_output, src_mask, target_mask)
return self.norm(x)
class ProjectionLayer(nn.Module):
def __init__(self, d_model, vocab_size):
super().__init__()
self.proj = nn.Linear(d_model, vocab_size)
def forward(self, x):
# (batch, seq_len, d_model) -> (batch, seq_len, vocab_size)
return torch.log_softmax(self.proj(x), dim=-1)
class Transformer(nn.Module):
def __init__(self, encoder: Encoder, decoder: Decoder, src_embedding: InputEmbeddings, target_embeddings: InputEmbeddings, src_position: PositionalEmbedding, target_position: PositionalEmbedding, projection_layer: ProjectionLayer):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embedddings = src_embedding
self.target_embeddings = target_embeddings
self.src_position = src_position
self.target_position = target_position
self.projection_layer = projection_layer
def encode(self, src, src_mask):
# apply source embeddings on the input source (sentence) at first.
src = self.src_embedddings(src)
# apply positional embeddings on the source embeddings.
src = self.src_position(src)
# then apply encoder on these positional embeddings and return.
return self.encoder(src, src_mask)
def decode(self, encoder_output, src_mask, target, target_mask):
# apply target_embeddings to the target
target = self.target_embeddings(target)
# apply positional embeddings to the target sentence
target = self.target_position(target)
# apply decoder with the parameters and return
return self.decoder(target, encoder_output, src_mask, target_mask)
def project(self, x):
return self.projection_layer(x)
def build_transformer(src_vocab_size: int, target_vocab_size: int, src_seq_len: int, target_seq_len: int, d_model: int = 512, N: int = 6, h: int = 8, dropout: float = 0.1, d_ff: int = 2048) -> Transformer:
# first we create the source and target embeddings (encoder and decoder)
src_embedding = InputEmbeddings(d_model, src_vocab_size)
target_embedding = InputEmbeddings(d_model, target_vocab_size)
# Then the positional embedding layers
src_position = PositionalEmbedding(d_model, src_seq_len, dropout)
target_position = PositionalEmbedding(d_model, target_seq_len, dropout)
# iterate through all number of heads
encoder_blocks = []
for _ in range(N):
encoder_self_attention_block = MultiHeadAttention(d_model, h, dropout)
feed_forward_block = FeedForward(d_model, d_ff, dropout)
encoder_block = EncoderBlock(encoder_self_attention_block, feed_forward_block, dropout)
encoder_blocks.append(encoder_block)
decoder_blocks = []
for _ in range(N):
decoder_self_attention_block = MultiHeadAttention(d_model, h, dropout)
decoder_cross_attention_block = MultiHeadAttention(d_model, h, dropout)
feed_forward_block = FeedForward(d_model, d_ff, dropout)
decoder_block = DecoderBlock(decoder_self_attention_block, decoder_cross_attention_block, feed_forward_block, dropout)
decoder_blocks.append(decoder_block)
# create encoder and decoder using these blocks
encoder = Encoder(nn.ModuleList(encoder_blocks))
decoder = Decoder(nn.ModuleList(decoder_blocks))
projection_layer = ProjectionLayer(d_model, target_vocab_size)
transformer = Transformer(encoder, decoder, src_embedding, target_embedding, src_position, target_position, projection_layer)
# initialize parameters
for p in transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return transformer