[ad8447]: / (1) PyTorch_HistoNet / modelGeno / vgg16_bn_Geno.py

Download this file

104 lines (74 with data), 3.3 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
import torch
import torch.nn as nn
class vgg16_bn_Geno(nn.Module):
def __init__(self, numClasses):
super(vgg16_bn_Geno, self).__init__()
in_channels = 3
self.features = nn.Sequential(
nn.Conv2d(in_channels, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64), #or 1d?
nn.Dropout(0.3, inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64), #or 1d?
nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128), #or 1d?
nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256), #or 1d?
nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.Dropout(0.4, inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512), #or 1d?
nn.MaxPool2d(kernel_size=14, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Dropout(0.5, inplace=True)
)
self.classifierG = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Dropout(0.5, inplace=True),
nn.Linear(512, numClasses),
)
# Defining the forward pass
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.classifierG(x)
return x