13import math
14
15import torch
16import torch.nn as nn
17
18from labml_nn.utils import clone_module_list
19from .feed_forward import FeedForward
20from .mha import MultiHeadAttention
21from .positional_encoding import get_positional_encoding
24class EmbeddingsWithPositionalEncoding(nn.Module):
31 def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
32 super().__init__()
33 self.linear = nn.Embedding(n_vocab, d_model)
34 self.d_model = d_model
35 self.register_buffer('positional_encodings', get_positional_encoding(d_model, max_len))
37 def forward(self, x: torch.Tensor):
38 pe = self.positional_encodings[:x.shape[0]].requires_grad_(False)
39 return self.linear(x) * math.sqrt(self.d_model) + pe
42class EmbeddingsWithLearnedPositionalEncoding(nn.Module):
49 def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
50 super().__init__()
51 self.linear = nn.Embedding(n_vocab, d_model)
52 self.d_model = d_model
53 self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True)
55 def forward(self, x: torch.Tensor):
56 pe = self.positional_encodings[:x.shape[0]]
57 return self.linear(x) * math.sqrt(self.d_model) + pe
60class TransformerLayer(nn.Module):
d_model
is the token embedding size self_attn
is the self attention module src_attn
is the source attention module (when this is used in a decoder) feed_forward
is the feed forward module dropout_prob
is the probability of dropping out after self attention and FFN69 def __init__(self, *,
70 d_model: int,
71 self_attn: MultiHeadAttention,
72 src_attn: MultiHeadAttention = None,
73 feed_forward: FeedForward,
74 dropout_prob: float):
82 super().__init__()
83 self.size = d_model
84 self.self_attn = self_attn
85 self.src_attn = src_attn
86 self.feed_forward = feed_forward
87 self.dropout = nn.Dropout(dropout_prob)
88 self.norm_self_attn = nn.LayerNorm([d_model])
89 if self.src_attn is not None:
90 self.norm_src_attn = nn.LayerNorm([d_model])
91 self.norm_ff = nn.LayerNorm([d_model])
Whether to save input to the feed forward layer
93 self.is_save_ff_input = False
95 def forward(self, *,
96 x: torch.Tensor,
97 mask: torch.Tensor,
98 src: torch.Tensor = None,
99 src_mask: torch.Tensor = None):
Normalize the vectors before doing self attention
101 z = self.norm_self_attn(x)
Run through self attention, i.e. keys and values are from self
103 self_attn = self.self_attn(query=z, key=z, value=z, mask=mask)
Add the self attention results
105 x = x + self.dropout(self_attn)
If a source is provided, get results from attention to source. This is when you have a decoder layer that pays attention to encoder outputs
110 if src is not None:
Normalize vectors
112 z = self.norm_src_attn(x)
Attention to source. i.e. keys and values are from source
114 attn_src = self.src_attn(query=z, key=src, value=src, mask=src_mask)
Add the source attention results
116 x = x + self.dropout(attn_src)
Normalize for feed-forward
119 z = self.norm_ff(x)
Save the input to the feed forward layer if specified
121 if self.is_save_ff_input:
122 self.ff_input = z.clone()
Pass through the feed-forward network
124 ff = self.feed_forward(z)
Add the feed-forward results back
126 x = x + self.dropout(ff)
127
128 return x
131class Encoder(nn.Module):
138 def __init__(self, layer: TransformerLayer, n_layers: int):
139 super().__init__()
Make copies of the transformer layer
141 self.layers = clone_module_list(layer, n_layers)
Final normalization layer
143 self.norm = nn.LayerNorm([layer.size])
145 def forward(self, x: torch.Tensor, mask: torch.Tensor):
Run through each transformer layer
147 for layer in self.layers:
148 x = layer(x=x, mask=mask)
Finally, normalize the vectors
150 return self.norm(x)
153class Decoder(nn.Module):
160 def __init__(self, layer: TransformerLayer, n_layers: int):
161 super().__init__()
Make copies of the transformer layer
163 self.layers = clone_module_list(layer, n_layers)
Final normalization layer
165 self.norm = nn.LayerNorm([layer.size])
167 def forward(self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
Run through each transformer layer
169 for layer in self.layers:
170 x = layer(x=x, mask=tgt_mask, src=memory, src_mask=src_mask)
Finally, normalize the vectors
172 return self.norm(x)
This predicts the tokens and gives the lof softmax of those. You don't need this if you are using nn.CrossEntropyLoss
.
175class Generator(nn.Module):
185 def __init__(self, n_vocab: int, d_model: int):
186 super().__init__()
187 self.projection = nn.Linear(d_model, n_vocab)
189 def forward(self, x):
190 return self.projection(x)
193class EncoderDecoder(nn.Module):
200 def __init__(self, encoder: Encoder, decoder: Decoder, src_embed: nn.Module, tgt_embed: nn.Module, generator: nn.Module):
201 super().__init__()
202 self.encoder = encoder
203 self.decoder = decoder
204 self.src_embed = src_embed
205 self.tgt_embed = tgt_embed
206 self.generator = generator
This was important from their code. Initialize parameters with Glorot / fan_avg.
210 for p in self.parameters():
211 if p.dim() > 1:
212 nn.init.xavier_uniform_(p)
214 def forward(self, src: torch.Tensor, tgt: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
Run the source through encoder
216 enc = self.encode(src, src_mask)
Run encodings and targets through decoder
218 return self.decode(enc, src_mask, tgt, tgt_mask)
220 def encode(self, src: torch.Tensor, src_mask: torch.Tensor):
221 return self.encoder(self.src_embed(src), src_mask)
223 def decode(self, memory: torch.Tensor, src_mask: torch.Tensor, tgt: torch.Tensor, tgt_mask: torch.Tensor):
224 return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)