This is an annotated PyTorch experiment to train a ALiBi model.
This is based on our GPT model.
14import torch
15from torch.utils.data import DataLoader
16
17from labml import experiment, tracker
18from labml.configs import option, calculate
19from labml_helpers.datasets.text import SequentialUnBatchedDataset
20from labml_nn.transformers.alibi import AlibiMultiHeadAttention
21from labml_nn.experiments.nlp_autoregression import transpose_batch
22from labml_nn.transformers import TransformerConfigs
23from labml_nn.transformers.gpt import Configs as GPTConfigs
26class Configs(GPTConfigs):
ALiBi based transformer (defined below)
34 transformer: TransformerConfigs = 'GPT_ALiBi'
Longer validation set
36 valid_seq_len: int = 128
37 valid_loader = 'shuffled_longer_valid_loader'
Log losses at the initial and final tokens
39 def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
If there are more tokens that the training sequence length (during validation),
44 if self.seq_len < output.shape[0]:
Log the loss at training sequence length
46 tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))
Log the loss at the first token
48 tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))
Log the loss at the final token
50 tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))
Create an ALiBi attention module
53def _alibi_mha(c: TransformerConfigs):
57 return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
Set all attention mechanisms to ALiBi
61calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)
62calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)
63calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)
Shuffled validation data loader with valid_seq_len
sequence length
66@option(Configs.valid_loader)
67def shuffled_longer_valid_loader(c: Configs):
71 return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,
72 dataset=c.text,
73 seq_len=c.valid_seq_len),
74 batch_size=c.batch_size,
75 collate_fn=transpose_batch,
76 shuffle=True)
79@option(Configs.transformer, 'GPT_ALiBi')
80def _transformer_configs(c: Configs):
We use our configurable transformer implementation
87 conf = TransformerConfigs()
Set the vocabulary sizes for embeddings and generating logits
89 conf.n_src_vocab = c.n_tokens
90 conf.n_tgt_vocab = c.n_tokens
GPT uses GELU activation for position wise feedforward
92 conf.ffn.activation = 'GELU'
ALiBi doesn't use positional embeddings
95 conf.src_embed = 'no_pos'
96 conf.tgt_embed = 'no_pos'
Set all attention mechanisms to ALiBi
99 conf.encoder_attn = 'alibi_mha'
100 conf.decoder_attn = 'alibi_mha'
101 conf.decoder_mem_attn = 'alibi_mha'
104 return conf
107def main():
Create experiment
109 experiment.create(name="gpt_alibi")
Create configs
111 conf = Configs()
Override configurations
113 experiment.configs(conf, {
Use character level tokenizer
115 'tokenizer': 'character',
Prompt separator is blank
117 'prompt_separator': '',
Starting prompt for sampling
119 'prompt': 'It is ',
Use Tiny Shakespeare dataset
121 'text': 'tiny_shakespeare',
'text': 'tiny_shakespeare_no_split',
Use a context size of
125 'seq_len': 64,
Use a context size of
127 'valid_seq_len': 80,
Train for epochs
129 'epochs': 128,
Batch size
131 'batch_size': 128,
Switch between training and validation for times per epoch
134 'inner_iterations': 10,
Transformer configurations
137 'transformer.d_model': 128,
138 'transformer.ffn.d_ff': 512,
139 'transformer.n_heads': 8,
140 'transformer.n_layers': 4,
141 'transformer.dropout': 0.1,
142 })
Set models for saving and loading
145 experiment.add_pytorch_models({'model': conf.model})
Start the experiment
148 with experiment.start():
Run training
150 conf.run()
154if __name__ == '__main__':
155 main()