[a18f15]: / algorithms / classifiers.py

Download this file

64 lines (47 with data), 2.0 kB

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os, sys
import torch
import torchvision
from torch import nn
sys.path.append(os.getcwd())
from algorithms.arch.resnet import loadResnetBackbone
import utilities.runUtils as rutl
##================= CLassifier Wrapper =========================================
class ClassifierNet(nn.Module):
def __init__(self, arch, fc_layer_sizes=[512,1000],
feature_dropout=0, classifier_dropout=0,
feature_freeze = False, feature_bnorm = False,
torch_pretrain=None):
super().__init__()
rutl.START_SEED(7)
self.fc_layer_sizes = fc_layer_sizes
# Feature Extractor
self.backbone,self.feat_outsize = loadResnetBackbone(arch=arch,
torch_pretrain=torch_pretrain,
freeze=feature_freeze)
fx_layers = []
if feature_bnorm:
fx_layers.append(nn.BatchNorm1d(self.feat_outsize, affine=False))
fx_layers.append(nn.Dropout(p=feature_dropout))
self.featx_proc = nn.Sequential(*fx_layers)
# Classifier
sizes = [self.feat_outsize] + list(self.fc_layer_sizes)
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.LayerNorm(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Dropout(p=classifier_dropout))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.classifier = nn.Sequential(*layers)
def forward(self, x):
x = self.backbone(x)
x = self.featx_proc(x)
out = self.classifier(x)
return out
if __name__ == "__main__":
from torchinfo import summary
model = ClassifierNet(arch='efficientnet_b0', fc_layer_sizes=[64,8],
feature_dropout=0, classifier_dropout=0,
torch_pretrain=None)
summary(model, (1, 3, 200, 200))
print(model)