Adam Optimizer with Warmup and Cosine Decay

This extends AMSGrad optimizer and adds a warmup stage.

11import math
12from typing import Dict
13
14from labml_nn.optimizers import WeightDecay
15from labml_nn.optimizers.amsgrad import AMSGrad

Adam Optimizer with Warmup and Cosine Decay

This class extends from AMSGrad optimizer defined in amsgrad.py .

18class AdamWarmupCosineDecay(AMSGrad):

Initialize the optimizer

  • params is the list of parameters
  • lr is the learning rate
  • betas is a tuple of (, )
  • eps is or based on optimized_update
  • weight_decay is an instance of class WeightDecay defined in __init__.py
  • 'optimized_update' is a flag whether to optimize the bias correction of the second moment by doing it after adding
  • amsgrad is a flag indicating whether to use AMSGrad or fallback to plain Adam
  • warmup number of warmup steps
  • total_steps total number of steps. Cosine decay reaches 0 at this, but stays at 10% of lr because we take
  • defaults is a dictionary of default for group values. This is useful when you want to extend the class AdamWarmup .
27    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
28                 weight_decay: WeightDecay = WeightDecay(),
29                 optimized_update: bool = True,
30                 amsgrad=False, warmup=0, total_steps=1e10, defaults=None):
49        defaults = {} if defaults is None else defaults
50        defaults.update(dict(warmup=warmup, total_steps=total_steps))
51        super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)

Get learning-rate

where is the number of warmup steps.

53    def get_lr(self, state: Dict[str, any], group: Dict[str, any]):

If we are in warmup stage

61        if group['warmup'] > state['step']:

A linearly increasing learning rate from to

63            return 1e-8 + state['step'] * group['lr'] / group['warmup']
64        else:

Constant learning rate

66            progress = (state['step'] - group['warmup']) / max(1, group['total_steps'] - group['warmup'])
67            return group['lr'] * max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))

Plot learning rate for different warmups and model sizes

Plot of learning rate

70def _test_lr():
76    import matplotlib.pyplot as plt
77    import numpy as np
78    from torch import nn
79
80    model = nn.Linear(10, 10)
81    opt = AdamWarmupCosineDecay(model.parameters(), warmup=5000, lr=1e-4, total_steps=4e6)
82    steps = 20_000
83    plt.plot(np.arange(1, steps), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps)])
84    plt.legend(["5000:4e6", "5000:2e6", "5000:1e6"])
85    plt.title("Learning Rate")
86    plt.show()
87
88    steps = int(6e6)
89    step_size = 1000
90    plt.plot(np.arange(1, steps, step_size), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps, step_size)])
91    plt.legend(["5000:4e6", "5000:2e6", "5000:1e6"])
92    plt.title("Learning Rate")
93    plt.show()
94
95
96if __name__ == '__main__':
97    _test_lr()