This trains a small model on CIFAR 10 to test how much distillation benefits.
15import torch.nn as nn
16
17from labml import experiment, logger
18from labml.configs import option
19from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
20from labml_nn.normalization.batch_norm import BatchNormWe use CIFAR10Configs which defines all the
dataset related configurations, optimizer, and a training loop.
23class Configs(CIFAR10Configs):30 pass33class SmallModel(CIFAR10VGGModel):Create a convolution layer and the activations
40 def conv_block(self, in_channels, out_channels) -> nn.Module:44 return nn.Sequential(Convolution layer
46 nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),Batch normalization
48 BatchNorm(out_channels, track_running_stats=False),ReLU activation
50 nn.ReLU(inplace=True),
51 )53 def __init__(self):Create a model with given convolution sizes (channels)
55 super().__init__([[32, 32], [64, 64], [128], [128], [128]])58@option(Configs.model)
59def _small_model(c: Configs):63 return SmallModel().to(c.device)66def main():Create experiment
68 experiment.create(name='cifar10', comment='small model')Create configurations
70 conf = Configs()Load configurations
72 experiment.configs(conf, {
73 'device.cuda_device': 1,
74 'optimizer.optimizer': 'Adam',
75 'optimizer.learning_rate': 2.5e-4,
76 })Set model for saving/loading
78 experiment.add_pytorch_models({'model': conf.model})Print number of parameters in the model
80 logger.inspect(params=(sum(p.numel() for p in conf.model.parameters() if p.requires_grad)))Start the experiment and run the training loop
82 with experiment.start():
83 conf.run()87if __name__ == '__main__':
88 main()