14import math
15
16import torch
17import torch.nn as nn
18
19from labml_nn.utils import clone_module_list
20from .feed_forward import FeedForward
21from .mha import MultiHeadAttention
22from .positional_encoding import get_positional_encoding32 def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
33 super().__init__()
34 self.linear = nn.Embedding(n_vocab, d_model)
35 self.d_model = d_model
36 self.register_buffer('positional_encodings', get_positional_encoding(d_model, max_len))38 def forward(self, x: torch.Tensor):
39 pe = self.positional_encodings[:x.shape[0]].requires_grad_(False)
40 return self.linear(x) * math.sqrt(self.d_model) + pe43class EmbeddingsWithLearnedPositionalEncoding(nn.Module):50 def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
51 super().__init__()
52 self.linear = nn.Embedding(n_vocab, d_model)
53 self.d_model = d_model
54 self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True)56 def forward(self, x: torch.Tensor):
57 pe = self.positional_encodings[:x.shape[0]]
58 return self.linear(x) * math.sqrt(self.d_model) + pe它可以充当编码器层或解码器层。
🗒 包括论文在内的一些实现似乎在图层归一化的位置上存在差异。在这里,我们在注意力和前馈网络之前进行层归一化,并添加原始残差向量。另一种方法是在添加残差后进行图层归一化。但是我们发现在训练时这种情况不太稳定。我们在《变压器架构中的层规范化》一文中找到了对此的详细讨论。
61class TransformerLayer(nn.Module):d_model
是令牌嵌入的大小self_attn
是自我关注模块src_attn
是源关注模块(当它在解码器中使用时)feed_forward
是前馈模块dropout_prob
是自我关注和 FFN 后退学的概率79 def __init__(self, *,
80 d_model: int,
81 self_attn: MultiHeadAttention,
82 src_attn: MultiHeadAttention = None,
83 feed_forward: FeedForward,
84 dropout_prob: float):92 super().__init__()
93 self.size = d_model
94 self.self_attn = self_attn
95 self.src_attn = src_attn
96 self.feed_forward = feed_forward
97 self.dropout = nn.Dropout(dropout_prob)
98 self.norm_self_attn = nn.LayerNorm([d_model])
99 if self.src_attn is not None:
100 self.norm_src_attn = nn.LayerNorm([d_model])
101 self.norm_ff = nn.LayerNorm([d_model])是否将输入保存到前馈层
103 self.is_save_ff_input = False105 def forward(self, *,
106 x: torch.Tensor,
107 mask: torch.Tensor,
108 src: torch.Tensor = None,
109 src_mask: torch.Tensor = None):在进行自我注意之前对向量进行归一化
111 z = self.norm_self_attn(x)通过自我关注,即关键和价值来自自我
113 self_attn = self.self_attn(query=z, key=z, value=z, mask=mask)添加自我关注的结果
115 x = x + self.dropout(self_attn)如果提供了来源,则从关注源获取结果。这是当你有一个关注编码器输出的解码器层
时120 if src is not None:归一化向量
122 z = self.norm_src_attn(x)注意源。即键和值来自源
124 attn_src = self.src_attn(query=z, key=src, value=src, mask=src_mask)添加来源关注结果
126 x = x + self.dropout(attn_src)标准化以进行前馈
129 z = self.norm_ff(x)如果已指定,则将输入保存到前馈图层
131 if self.is_save_ff_input:
132 self.ff_input = z.clone()通过前馈网络
134 ff = self.feed_forward(z)将前馈结果添加回来
136 x = x + self.dropout(ff)
137
138 return x141class Encoder(nn.Module):148 def __init__(self, layer: TransformerLayer, n_layers: int):
149 super().__init__()制作变压器层的副本
151 self.layers = clone_module_list(layer, n_layers)最终归一化层
153 self.norm = nn.LayerNorm([layer.size])155 def forward(self, x: torch.Tensor, mask: torch.Tensor):穿过每个变压器层
157 for layer in self.layers:
158 x = layer(x=x, mask=mask)最后,对向量进行归一化
160 return self.norm(x)163class Decoder(nn.Module):170 def __init__(self, layer: TransformerLayer, n_layers: int):
171 super().__init__()制作变压器层的副本
173 self.layers = clone_module_list(layer, n_layers)最终归一化层
175 self.norm = nn.LayerNorm([layer.size])177 def forward(self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):穿过每个变压器层
179 for layer in self.layers:
180 x = layer(x=x, mask=tgt_mask, src=memory, src_mask=src_mask)最后,对向量进行归一化
182 return self.norm(x)185class Generator(nn.Module):195 def __init__(self, n_vocab: int, d_model: int):
196 super().__init__()
197 self.projection = nn.Linear(d_model, n_vocab)199 def forward(self, x):
200 return self.projection(x)203class EncoderDecoder(nn.Module):210 def __init__(self, encoder: Encoder, decoder: Decoder, src_embed: nn.Module, tgt_embed: nn.Module, generator: nn.Module):
211 super().__init__()
212 self.encoder = encoder
213 self.decoder = decoder
214 self.src_embed = src_embed
215 self.tgt_embed = tgt_embed
216 self.generator = generator从他们的代码来看,这很重要。使用 Glorot/fan_avg 初始化参数。
220 for p in self.parameters():
221 if p.dim() > 1:
222 nn.init.xavier_uniform_(p)224 def forward(self, src: torch.Tensor, tgt: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):通过编码器运行源码
226 enc = self.encode(src, src_mask)通过解码器运行编码和目标
228 return self.decode(enc, src_mask, tgt, tgt_mask)230 def encode(self, src: torch.Tensor, src_mask: torch.Tensor):
231 return self.encoder(self.src_embed(src), src_mask)233 def decode(self, memory: torch.Tensor, src_mask: torch.Tensor, tgt: torch.Tensor, tgt_mask: torch.Tensor):
234 return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)