a b/opengait/modeling/backbones/plain.py
1
"""The plain backbone.
2
3
    The plain backbone only contains the BasicConv2d, FocalConv2d and MaxPool2d and LeakyReLU layers.
4
"""
5
6
import torch.nn as nn
7
from ..modules import BasicConv2d, FocalConv2d
8
9
10
class Plain(nn.Module):
11
    """
12
    The Plain backbone class.
13
14
    An implicit LeakyRelu appended to each layer except maxPooling. 
15
    The kernel size, stride and padding of the first convolution layer are 5, 1, 2, the ones of other layers are 3, 1, 1.
16
17
    Typical usage: 
18
    - BC-64: Basic conv2d with output channel 64. The input channel is the output channel of previous layer.
19
20
    - M: nn.MaxPool2d(kernel_size=2, stride=2)].
21
22
    - FC-128-1: Focal conv2d with output channel 64 and halving 1(divided to 2^1=2 parts).
23
24
    Use it in your configuration file.
25
    """
26
27
    def __init__(self, layers_cfg, in_channels=1):
28
        super(Plain, self).__init__()
29
        self.layers_cfg = layers_cfg
30
        self.in_channels = in_channels
31
32
        self.feature = self.make_layers()
33
34
    def forward(self, seqs):
35
        out = self.feature(seqs)
36
        return out
37
38
    def make_layers(self):
39
        """
40
        Reference: torchvision/models/vgg.py
41
        """
42
        def get_layer(cfg, in_c, kernel_size, stride, padding):
43
            cfg = cfg.split('-')
44
            typ = cfg[0]
45
            if typ not in ['BC', 'FC']:
46
                raise ValueError('Only support BC or FC, but got {}'.format(typ))
47
            out_c = int(cfg[1])
48
49
            if typ == 'BC':
50
                return BasicConv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=padding)
51
            return FocalConv2d(in_c, out_c, kernel_size=kernel_size, stride=stride, padding=padding, halving=int(cfg[2]))
52
53
        Layers = [get_layer(self.layers_cfg[0], self.in_channels,
54
                            5, 1, 2), nn.LeakyReLU(inplace=True)]
55
        in_c = int(self.layers_cfg[0].split('-')[1])
56
        for cfg in self.layers_cfg[1:]:
57
            if cfg == 'M':
58
                Layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
59
            else:
60
                conv2d = get_layer(cfg, in_c, 3, 1, 1)
61
                Layers += [conv2d, nn.LeakyReLU(inplace=True)]
62
                in_c = int(cfg.split('-')[1])
63
        return nn.Sequential(*Layers)