15import torch.nn as nn
16
17from labml import experiment, logger
18from labml.configs import option
19from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
20from labml_nn.normalization.batch_norm import BatchNormWe use CIFAR10Configs
 which defines all the dataset related configurations, optimizer, and a training loop.
23class Configs(CIFAR10Configs):30    pass33class LargeModel(CIFAR10VGGModel):Create a convolution layer and the activations
40    def conv_block(self, in_channels, out_channels) -> nn.Module:44        return nn.Sequential(Dropout
46            nn.Dropout(0.1),Convolution layer
48            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),Batch normalization
50            BatchNorm(out_channels, track_running_stats=False),ReLU activation
52            nn.ReLU(inplace=True),
53        )55    def __init__(self):Create a model with given convolution sizes (channels)
57        super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])60@option(Configs.model)
61def _large_model(c: Configs):65    return LargeModel().to(c.device)68def main():Create experiment
70    experiment.create(name='cifar10', comment='large model')Create configurations
72    conf = Configs()Load configurations
74    experiment.configs(conf, {
75        'optimizer.optimizer': 'Adam',
76        'optimizer.learning_rate': 2.5e-4,
77        'is_save_models': True,
78        'epochs': 20,
79    })Set model for saving/loading
81    experiment.add_pytorch_models({'model': conf.model})Print number of parameters in the model
83    logger.inspect(params=(sum(p.numel() for p in conf.model.parameters() if p.requires_grad)))Start the experiment and run the training loop
85    with experiment.start():
86        conf.run()90if __name__ == '__main__':
91    main()