3import numpy as np
4import torch
5import torch.nn as nn
6
7device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

Use the formula: [(W-K+2P)/S] + 1 where: W: Is the input volume size for each dimension K: Is the kernel size P: Is the padding S: Is the stride

18def CalcConvFormula(W, K, P, S):
19    return int(np.floor(((W - K + 2 * P) / S) + 1))

https://stackoverflow.com/questions/53580088/calculate-the-output-size-in-convolution-layer Calculate the output shape after applying a convolution

24def CalcConvOutShape(in_shape, kernel_size, padding, stride, out_filters):

Multiple options for different kernel shapes

26    if type(kernel_size) == int:
27        out_shape = [CalcConvFormula(in_shape[i], kernel_size, padding, stride) for i in range(2)]
28    else:
29        out_shape = [CalcConvFormula(in_shape[i], kernel_size[i], padding, stride) for i in range(2)]
30
31    return (out_shape[0], out_shape[1], out_filters)  # , batch_size... but not necessary.
33class CNN(nn.Module):
34    def __init__(self
35                 , in_features
36                 , out_features
37                 , conv_filters
38                 , conv_kernel_size
39                 , conv_strides
40                 , conv_pad
41                 , actv_func
42                 , max_pool_kernels
43                 , max_pool_strides
44                 , l1=120
45                 , l2=84
46                 , MLP=None
47                 , pre_module_list=None
48                 , use_dropout=False
49                 , use_batch_norm=False
50                 , device="cpu"
51                 ):
52        super(CNN, self).__init__()

Gerneral model Properties

55        self.in_features = in_features
56        self.out_features = out_features

Convolution operations

59        self.conv_filters = conv_filters
60        self.conv_kernel_size = conv_kernel_size
61        self.conv_strides = conv_strides
62        self.conv_pad = conv_pad

Convolution Activiations

65        self.actv_func = actv_func

Max Pools

68        self.max_pool_kernels = max_pool_kernels
69        self.max_pool_strides = max_pool_strides

Regularization

72        self.use_dropout = use_dropout
73        self.use_batch_norm = use_batch_norm

Tunable parameters

76        self.l1 = l1
77        self.l2 = l2

Number of conv/pool/act/batch_norm/dropout layers we add

80        self.n_conv_layers = len(self.conv_filters)

Create the module list

83        if pre_module_list:
84            self.module_list = pre_module_list
85        else:
86            self.module_list = nn.ModuleList()
87
88        self.shape_list = []
89        self.shape_list.append(self.in_features)
90
91        self.build_()

Send to gpu

94        self.device = device
95        self.to(self.device)
97    def build_(self):

Track shape

99        cur_shape = self.GetCurShape()
100
101        for i in range(self.n_conv_layers):
102            if i == 0:
103                if len(self.in_features) == 2:
104                    in_channels = 1
105                else:
106                    in_channels = self.in_features[2]
107            else:
108                in_channels = self.conv_filters[i - 1]
109
110            cur_shape = CalcConvOutShape(cur_shape, self.conv_kernel_size[i], self.conv_pad[i], self.conv_strides[i],
111                                         self.conv_filters[i])
112            self.shape_list.append(cur_shape)
113
114            conv = nn.Conv2d(in_channels=in_channels,
115                             out_channels=self.conv_filters[i],
116                             kernel_size=self.conv_kernel_size[i],
117                             padding=self.conv_pad[i],
118                             stride=self.conv_strides[i]
119                             )
120            self.module_list.append(conv)
121
122            if self.use_batch_norm:
123                self.module_list.append(nn.BatchNorm2d(cur_shape[2]))
124
125            if self.use_dropout:
126                self.module_list.append(nn.Dropout(p=0.15))

Add the Activation function

129            if self.actv_func[i]:
130                self.module_list.append(GetActivation(name=self.actv_func[i]))
131
132            if self.max_pool_kernels:
133                if self.max_pool_kernels[i]:
134                    self.module_list.append(nn.MaxPool2d(self.max_pool_kernels[i], stride=self.max_pool_strides[i]))
135                    cur_shape = CalcConvOutShape(cur_shape, self.max_pool_kernels[i], 0, self.max_pool_strides[i],
136                                                 cur_shape[2])
137                    self.shape_list.append(cur_shape)

Adding MLP

140        s = self.GetCurShape()
141        in_features = s[0] * s[1] * s[2]
142        self.module_list.append(nn.Linear(in_features, self.l1))
143        self.module_list.append(nn.ReLU())
144        self.module_list.append(nn.Linear(self.l1, self.l2))
145        self.module_list.append(nn.ReLU())
146        self.module_list.append(nn.Linear(self.l2, self.out_features))
148    def forward(self, x):
149        j = 0
150        for i, module in enumerate(self.module_list):
151            if isinstance(module, nn.Linear) and j == 0:
152                x = torch.flatten(x.float(), start_dim=1)
153                j = 1
154            x = module(x)
155        return x
157    def GetCurShape(self):
158        return self.shape_list[-1]
160def GetCNN(l1=120, l2=84):
161    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
162    cnn = CNN(in_features=(32, 32, 3),
163              out_features=10,
164              conv_filters=[32, 32, 64, 64],  # , 128, 256, 512
165              conv_kernel_size=[3, 3, 3, 3],  # ,3,3,1
166              conv_strides=[1, 1, 1, 1],  # ,1,1,1
167              conv_pad=[0, 0, 0, 0, 0, 0, 0],
168              actv_func=["relu", "relu", "relu", "relu"],  # , "relu", "relu", "relu"
169              max_pool_kernels=[None, (2, 2), None, (2, 2)],  # , None, None, None
170              max_pool_strides=[None, 2, None, 2],  # , None,None, None
171              l1=l1,
172              l2=l2,
173              use_dropout=False,
174              use_batch_norm=True,  # False
175              device=device
176              )
177
178    return cnn
181def GetActivation(name="relu"):
182    if name == "relu":
183        return nn.ReLU()
184    elif name == "leakyrelu":
185        return nn.LeakyReLU()
186    elif name == "Sigmoid":
187        return nn.Sigmoid()
188    elif name == "Tanh":
189        return nn.Tanh()
190    elif name == "Identity":
191        return nn.Identity()
192    else:
193        return nn.ReLU()