1from typing import Tuple
2
3import torch
4from labml import tracker
5
6from labml.configs import BaseConfigs, option, meta_config
This creates a configurable optimizer.
Arguments: learning_rate (float): Learning rate of the optimizer. Defaults to
0.01
. momentum (float): Momentum of the optimizer. Defaults to
0.5
. parameters: Model parameters to optimize. d_model (int): Embedding size of the model (for Noam optimizer). betas (Tuplefloat, float): Betas for Adam optimizer. Defaults to
(0.9, 0.999)
. eps (float): Epsilon for Adam/RMSProp optimizers. Defaults to
1e-8
. step_factor (int): Step factor for Noam optimizer. Defaults to
1024
.
Also there is a better (more options) implementation in
labml_nn
.
We recommend using that <https://nn.labml.ai/optimizers/configs.html>
_.
9class OptimizerConfigs(BaseConfigs):
26 optimizer: torch.optim.Adam
27 learning_rate: float = 0.01
28 momentum: float = 0.5
29 parameters: any
30 d_model: int
31 betas: Tuple[float, float] = (0.9, 0.999)
32 eps: float = 1e-8
33 step_factor: int = 1024
35 def __init__(self):
36 super().__init__(_primary='optimizer')
37
38
39meta_config(OptimizerConfigs.parameters)
42@option(OptimizerConfigs.optimizer, 'SGD')
43def sgd_optimizer(c: OptimizerConfigs):
44 return torch.optim.SGD(c.parameters, c.learning_rate, c.momentum)
45
46
47@option(OptimizerConfigs.optimizer, 'Adam')
48def adam_optimizer(c: OptimizerConfigs):
49 return torch.optim.Adam(c.parameters, lr=c.learning_rate,
50 betas=c.betas, eps=c.eps)
51
52
53class NoamOpt:
54 def __init__(self, model_size: int, learning_rate: float, warmup: int, step_factor: int, optimizer):
55 self.step_factor = step_factor
56 self.optimizer = optimizer
57 self.warmup = warmup
58 self.learning_rate = learning_rate
59 self.model_size = model_size
60 self._rate = 0
61
62 def step(self):
63 rate = self.rate(tracker.get_global_step() / self.step_factor)
64 for p in self.optimizer.param_groups:
65 p['lr'] = rate
66 self._rate = rate
67 self.optimizer.step()
68
69 def rate(self, step):
70 factor = self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))
71 return self.learning_rate * factor
72
73 def zero_grad(self):
74 self.optimizer.zero_grad()
75
76
77@option(OptimizerConfigs.optimizer, 'Noam')
78def noam_optimizer(c: OptimizerConfigs):
79 optimizer = torch.optim.Adam(c.parameters, lr=0.0, betas=c.betas, eps=c.eps)
80 return NoamOpt(c.d_model, 1, 2000, c.step_factor, optimizer)
81
82
83def _test_noam_optimizer():
84 import matplotlib.pyplot as plt
85 import numpy as np
86
87 opts = [NoamOpt(512, 1, 4000, None),
88 NoamOpt(512, 1, 8000, None),
89 NoamOpt(2048, 1, 2000, None)]
90 plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])
91 plt.legend(["512:4000", "512:8000", "256:4000"])
92 plt.title("Optimizer")
93 plt.show()
94
95
96if __name__ == '__main__':
97 _test_noam_optimizer()