CIFAR10 Experiment for Group Normalization

12import torch.nn as nn
13
14from labml import experiment
15from labml.configs import option
16from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
17from labml_nn.normalization.batch_norm import BatchNorm

VGG model for CIFAR-10 classification

This derives from the generic VGG style architecture.

20class Model(CIFAR10VGGModel):
27    def conv_block(self, in_channels, out_channels) -> nn.Module:
28        return nn.Sequential(
29            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
30            BatchNorm(out_channels),
31            nn.ReLU(inplace=True),
32        )
34    def __init__(self):
35        super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])

Create model

38@option(CIFAR10Configs.model)
39def model(c: CIFAR10Configs):
43    return Model().to(c.device)
46def main():

Create experiment

48    experiment.create(name='cifar10', comment='batch norm')

Create configurations

50    conf = CIFAR10Configs()

Load configurations

52    experiment.configs(conf, {
53        'optimizer.optimizer': 'Adam',
54        'optimizer.learning_rate': 2.5e-4,
55        'train_batch_size': 64,
56    })

Start the experiment and run the training loop

58    with experiment.start():
59        conf.run()

63if __name__ == '__main__':
64    main()