This trains a simple transformer introduced in Attention Is All You Need on an NLP auto-regression task (with Tiny Shakespeare dataset) with Sophia-G optimizer.
13import torch
14
15from labml import experiment, tracker
16from labml_nn.helpers.trainer import BatchIndex
17from labml_nn.optimizers.sophia import Sophia
18from labml_nn.transformers.basic.autoregressive_experiment import Configs as TransformerAutoRegressionConfigs
28 hess_interval: int = 10
29
30 optimizer: Sophia
32 def step(self, batch: any, batch_idx: BatchIndex):
Set training/eval mode
38 self.model.train(self.mode.is_train)
Move data to the device
41 data, target = batch[0].to(self.device), batch[1].to(self.device)
Estimate the Hessian diagonal every steps
44 if isinstance(self.optimizer, Sophia) and self.mode.is_train and batch_idx.idx % self.hess_interval == 0:
Get model outputs
46 output, *_ = self.model(data)
Create a categorical distribution from logits
49 samp_dist = torch.distributions.Categorical(logits=output)
Sample
51 y_sample = samp_dist.sample()
Calculate and log loss
54 loss = self.loss_func(output, y_sample)
55 tracker.add("loss.hess.", loss)
Calculate gradients
58 loss.backward()
Clip gradients
60 torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
67 self.optimizer.update_hessian(data.numel())
Clear the gradients
69 self.optimizer.zero_grad()
70 else:
Move data to the device
72 data, target = batch[0].to(self.device), batch[1].to(self.device)
Update global step (number of tokens processed) when in training mode
75 if self.mode.is_train:
76 tracker.add_global_step(data.shape[0] * data.shape[1])
Get model outputs. It's returning a tuple for states when using RNNs. This is not implemented yet. 😜
81 output, *_ = self.model(data)
Calculate and log loss
84 loss = self.loss_func(output, target)
85 tracker.add("loss.", loss)
Calculate and log accuracy
88 self.accuracy(output, target)
89 self.accuracy.track()
90
91 self.other_metrics(output, target)
Train the model
94 if self.mode.is_train:
Calculate gradients
96 loss.backward()
Clip gradients
98 torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
Take optimizer step
100 self.optimizer.step()
Log the model parameters and gradients on last batch of every epoch
102 if batch_idx.is_last and self.is_log_model_params_grads:
103 tracker.add('model', self.model)
Clear the gradients
105 self.optimizer.zero_grad()
Save the tracked metrics
108 tracker.save()
111def main():
Create experiment
113 experiment.create(name="transformer")
Create configs
115 conf = Configs()
Override configurations
117 experiment.configs(conf, {
Use character level tokenizer
119 'tokenizer': 'character',
Prompt separator is blank
121 'prompt_separator': '',
Starting prompt for sampling
123 'prompt': 'It is ',
Use Tiny Shakespeare dataset
125 'text': 'tiny_shakespeare',
Use a context size of
128 'seq_len': 512,
Train for 32 epochs
130 'epochs': 32,
Batch size
132 'batch_size': 16,
Switch between training and validation for times per epoch
135 'inner_iterations': 10,
Model size
138 'd_model': 256,
139 'transformer.n_heads': 16,
140 'transformer.ffn.d_ff': 1024,
Use Sophia optimizer
143 'optimizer.optimizer': 'Sophia',
144 'optimizer.learning_rate': 3e-4,
145 'optimizer.rho': 0.03,
146 })
Set models for saving and loading
149 experiment.add_pytorch_models({'model': conf.model})
Start the experiment
152 with experiment.start():
Run training
154 conf.run()
158if __name__ == '__main__':
159 main()