12import torch.nn as nn
13
14from labml import experiment
15from labml.configs import option
16from labml_helpers.module import Module
17from labml_nn.experiments.cifar10 import CIFAR10Configs
18from labml_nn.normalization.group_norm import GroupNorm
21class Model(Module):
26 def __init__(self, groups: int = 32):
27 super().__init__()
28 layers = []
RGB channels
30 in_channels = 3
Number of channels in each layer in each block
32 for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
Convolution, Normalization and Activation layers
34 for channels in block:
35 layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
36 GroupNorm(groups, channels),
37 nn.ReLU(inplace=True)]
38 in_channels = channels
Max pooling at end of each block
40 layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
Create a sequential model with the layers
43 self.layers = nn.Sequential(*layers)
Final logits layer
45 self.fc = nn.Linear(512, 10)
47 def forward(self, x):
The VGG layers
49 x = self.layers(x)
Reshape for classification layer
51 x = x.view(x.shape[0], -1)
Final linear layer
53 return self.fc(x)
56class Configs(CIFAR10Configs):
Number of groups
58 groups: int = 16
61@option(Configs.model)
62def model(c: Configs):
66 return Model(c.groups).to(c.device)
69def main():
Create experiment
71 experiment.create(name='cifar10', comment='group norm')
Create configurations
73 conf = Configs()
Load configurations
75 experiment.configs(conf, {
76 'optimizer.optimizer': 'Adam',
77 'optimizer.learning_rate': 2.5e-4,
78 })
Start the experiment and run the training loop
80 with experiment.start():
81 conf.run()
85if __name__ == '__main__':
86 main()