This trains a simple transformer model for auto-regression.
We try different variants for the position-wise feedforward network.
The reusable & configurable are defined in configs.py.
16import torch
17from labml import experiment
18from labml.configs import option
19from labml.utils.pytorch import get_modules
20from labml_helpers.module import Module
21
22from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
23from labml_nn.transformers import Encoder, Generator, TransformerConfigs
24from labml_nn.transformers.utils import subsequent_mask27class AutoregressiveModel(Module):32 def __init__(self, src_embed: Module, encoder: Encoder, generator: Generator):
33 super().__init__()Token embedding module
35 self.src_embed = src_embedTransformer based encoder
37 self.encoder = encoderNext token generation layer; this give logits of the the next token
40 self.generator = generatorThis will be initialized on the first call
42 self.src_mask = None44 def __call__(self, src: torch.Tensor):Create subsequent mask, so that the transformer can only pay attention to past tokens.
46 if self.src_mask is None or self.src_mask.size(0) != len(src):
47 self.src_mask = subsequent_mask(len(src)).to(src.device)Embed the tokens (src) and run it through the the transformer
49 res = self.encoder(self.src_embed(src), self.src_mask)Generate logits of the next token
51 return self.generator(res), None54class Configs(NLPAutoRegressionConfigs):61 transformer: TransformerConfigs
62 model: AutoregressiveModelInitialize the auto-regressive model
65@option(Configs.model)
66def autoregressive_model(c: Configs):70 m = AutoregressiveModel(c.transformer.src_embed, c.transformer.encoder, c.transformer.generator)
71 return m.to(c.device)Initialize the configurable transformer encoder for our autoregressive model.
74@option(Configs.transformer)
75def transformer_c(c: Configs):79 tc = TransformerConfigs()
80 tc.n_src_vocab = c.n_tokens
81 tc.n_tgt_vocab = c.n_tokens
82
83 return tc86def main():Create experiment
88 experiment.create(name="glu_variants")Create configs
90 conf = Configs()Load configurations
92 experiment.configs(conf,A dictionary of configurations to override
94 {'tokenizer': 'character',
95 'prompt_separator': '',
96 'prompt': 'It is ',
97 'text': 'tiny_shakespeare',
98
99 'optimizer.optimizer': 'Noam',
100 'optimizer.learning_rate': 1.,
101 'optimizer.d_model': 256,
102
103 'seq_len': 1024,
104 'epochs': 128,
105 'batch_size': 6,
106 'inner_iterations': 10,GLU Variant, one of GLU, Bilinear, ReGLU, GEGLU, SwiGLU
These are defined in the configurable FFN implementation
112 'transformer.ffn.glu_variant': 'Bilinear',Transformer configurations
115 'transformer.d_model': 256,
116 'transformer.ffn.d_ff': 1024,
117 'transformer.n_heads': 8,
118 'transformer.n_layers': 6})This is needed to initialize models
121 conf.n_tokens = conf.text.n_tokensSet models for saving and loading
124 experiment.add_pytorch_models(get_modules(conf))Start the experiment
127 with experiment.start():TrainValidConfigs.run
129 conf.run()
130
131
132if __name__ == '__main__':
133 main()