12import torch.nn as nn
13
14from labml import experiment
15from labml.configs import option
16from labml_helpers.module import Module
17from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
18from labml_nn.normalization.group_norm import GroupNorm
21class Model(CIFAR10VGGModel):
28 def conv_block(self, in_channels, out_channels) -> nn.Module:
29 return nn.Sequential(
30 nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
31 fnorm.GroupNorm(self.groups, out_channels),#new
32 nn.ReLU(inplace=True),
33 )
35 def __init__(self, groups: int = 32):
36 self.groups = groups#input param:groups to conv_block
37 super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
40class Configs(CIFAR10Configs):
Number of groups
42 groups: int = 16
45@option(Configs.model)
46def model(c: Configs):
50 return Model(c.groups).to(c.device)
53def main():
Create experiment
55 experiment.create(name='cifar10', comment='group norm')
Create configurations
57 conf = Configs()
Load configurations
59 experiment.configs(conf, {
60 'optimizer.optimizer': 'Adam',
61 'optimizer.learning_rate': 2.5e-4,
62 })
Start the experiment and run the training loop
64 with experiment.start():
65 conf.run()
69if __name__ == '__main__':
70 main()