CIFAR10 Experiment to try Weight Standardization and Batch-Channel Normalization

12import torch.nn as nn
13
14from labml import experiment
15from labml.configs import option
16from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
17from labml_nn.normalization.batch_channel_norm import BatchChannelNorm
18from labml_nn.normalization.weight_standardization.conv2d import Conv2d

VGG model for CIFAR-10 classification

This derives from the generic VGG style architecture.

21class Model(CIFAR10VGGModel):
28    def conv_block(self, in_channels, out_channels) -> nn.Module:
29        return nn.Sequential(
30            Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
31            BatchChannelNorm(out_channels, 32),
32            nn.ReLU(inplace=True),
33        )
35    def __init__(self):
36        super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])

Create model

39@option(CIFAR10Configs.model)
40def _model(c: CIFAR10Configs):
44    return Model().to(c.device)
47def main():

Create experiment

49    experiment.create(name='cifar10', comment='weight standardization')

Create configurations

51    conf = CIFAR10Configs()

Load configurations

53    experiment.configs(conf, {
54        'optimizer.optimizer': 'Adam',
55        'optimizer.learning_rate': 2.5e-4,
56        'train_batch_size': 64,
57    })

Start the experiment and run the training loop

59    with experiment.start():
60        conf.run()

64if __name__ == '__main__':
65    main()