12import torch
13from torch import nn
14
15from labml_helpers.module import Module
16from labml_nn.transformers import MultiHeadAttention
19class SpatialDepthWiseSharedConvolution(Module):
26 def __init__(self, kernel_size: int = 3):
27 super().__init__()
28 self.kernel_size = kernel_size
我们使用 PyTorch 的Conv1d
模块。我们在两边添加填充,然后裁剪最右边的kernel_size - 1
结果
33 self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=(kernel_size,), padding=(kernel_size - 1,))
x
有形状[seq_len, batch_size, heads, d_k]
35 def forward(self, x: torch.Tensor):
得到形状
41 seq_len, batch_size, heads, d_k = x.shape
排列为[batch_size, heads, d_k, seq_len]
43 x = x.permute(1, 2, 3, 0)
将形状改为[batch_size * heads * d_k, seq_len]
45 x = x.view(batch_size * heads * d_k, 1, seq_len)
一维卷积接受以下形式的输入[N, channels, sequence]
48 x = self.conv(x)
裁剪最右边的kernel_size - 1
结果,因为我们填充了两边
50 x = x[:, :, :-(self.kernel_size - 1)]
重塑为[batch_size, heads, d_k, seq_len]
52 x = x.view(batch_size, heads, d_k, seq_len)
排列为[seq_len, batch_size, heads, d_k]
54 x = x.permute(3, 0, 1, 2)
57 return x
60class MultiDSharedConvHeadAttention(MultiHeadAttention):
68 def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
69 super().__init__(heads, d_model, dropout_prob)
Multi-Head Attention 将创建查询、键和价值投影模块self.query
self.key
、和self.value
。
我们将空间深度共享卷积层组合到每个层上,并替换self.query
self.key
、和self.value
。
76 self.query = nn.Sequential(self.query, SpatialDepthWiseSharedConvolution())
77 self.key = nn.Sequential(self.key, SpatialDepthWiseSharedConvolution())
78 self.value = nn.Sequential(self.value, SpatialDepthWiseSharedConvolution())
81class SpatialDepthWisePerHeadConvolution(Module):
heads
是人头的数量d_k
是每个 head 中的通道数86 def __init__(self, heads: int, d_k: int, kernel_size: int = 3):
91 super().__init__()
92 self.kernel_size = kernel_size
我们使用 PyTorch 的Conv1d
模块。我们将组数设置为等于来自每个头部的通道数,以便它对每个通道和头部进行单独的卷积(使用不同的内核)。我们在两边添加填充,然后裁剪最右边的kernel_size - 1
结果
98 self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads,
99 kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads)
x
有形状[seq_len, batch_size, heads, d_k]
101 def forward(self, x: torch.Tensor):
得到形状
107 seq_len, batch_size, heads, d_k = x.shape
排列为[batch_size, heads, d_k, seq_len]
109 x = x.permute(1, 2, 3, 0)
将形状改为[batch_size heads * d_k, seq_len]
111 x = x.view(batch_size, heads * d_k, seq_len)
一维卷积接受以下形式的输入[N, channels, sequence]
114 x = self.conv(x)
裁剪最右边的kernel_size - 1
结果,因为我们填充了两边
116 x = x[:, :, :-(self.kernel_size - 1)]
重塑为[batch_size, heads, d_k, seq_len]
118 x = x.view(batch_size, heads, d_k, seq_len)
排列为[seq_len, batch_size, heads, d_k]
120 x = x.permute(3, 0, 1, 2)
123 return x
126class MultiDPHConvHeadAttention(MultiHeadAttention):
134 def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
135 super().__init__(heads, d_model, dropout_prob)
Multi-Head Attention 将创建查询、键和价值投影模块self.query
self.key
、和self.value
。
我们将每个头部的空间深度卷积层组合在一起,并替换self.query
self.key
、和self.value
。
142 self.query = nn.Sequential(self.query, SpatialDepthWisePerHeadConvolution(heads, self.d_k))
143 self.key = nn.Sequential(self.key, SpatialDepthWisePerHeadConvolution(heads, self.d_k))
144 self.value = nn.Sequential(self.value, SpatialDepthWisePerHeadConvolution(heads, self.d_k))