a b/networks/att_unet.py
1
import torch.nn as nn
2
import torch.nn.functional as F
3
import torch.utils.data
4
import torch
5
6
7
class conv_block(nn.Module):
8
    """
9
    Convolution Block
10
    """
11
    def __init__(self, in_ch, out_ch):
12
        super(conv_block, self).__init__()
13
14
        self.conv = nn.Sequential(
15
            nn.Conv3d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
16
            nn.BatchNorm3d(out_ch),
17
            nn.ReLU(inplace=True),
18
            nn.Conv3d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
19
            nn.BatchNorm3d(out_ch),
20
            nn.ReLU(inplace=True))
21
22
    def forward(self, x):
23
        x = self.conv(x)
24
        return x
25
26
27
class up_conv(nn.Module):
28
    """
29
    Up Convolution Block
30
    """
31
32
    def __init__(self, in_ch, out_ch):
33
        super(up_conv, self).__init__()
34
        self.up = nn.Sequential(
35
            nn.Upsample(scale_factor=2,mode='trilinear'),
36
            nn.Conv3d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
37
            nn.BatchNorm3d(out_ch),
38
            nn.ReLU(inplace=True)
39
        )
40
41
    def forward(self, x):
42
        x = self.up(x)
43
        return x
44
45
46
class AttentionBlock(nn.Module):
47
    def __init__(self, in_channels, skip_channels, mid_channels):
48
        super(AttentionBlock, self).__init__()
49
        self.W_skip = nn.Sequential(nn.Conv3d(skip_channels, mid_channels, kernel_size=1),
50
                                    nn.BatchNorm3d(mid_channels))
51
        self.W_x = nn.Sequential(nn.Conv3d(in_channels, mid_channels, kernel_size=1),
52
                                 nn.BatchNorm3d(mid_channels))
53
        self.psi = nn.Sequential(nn.Conv3d(mid_channels, 1, kernel_size=1),
54
                                 nn.BatchNorm3d(1),
55
                                 nn.Sigmoid())
56
57
    def forward(self, x_skip, x):
58
        x_skip = self.W_skip(x_skip)
59
        x = self.W_x(x)
60
        out = self.psi(nn.ReLU(inplace=True)(x_skip + x))
61
        return out * x_skip
62
63
64
class AttentionUp(nn.Module):
65
    def __init__(self, in_ch, out_ch):
66
        super(AttentionUp, self).__init__()
67
        self.attention = AttentionBlock(in_ch, out_ch, out_ch)
68
        self.conv1 = conv_block(in_ch+out_ch, out_ch)
69
70
    def forward(self, x, x_skip):
71
        # note : x_skip is the skip connection and x is the input from the previous block
72
        x = nn.functional.interpolate(x, x_skip.shape[2:], mode='trilinear', align_corners=False)
73
        x_attention = self.attention(x_skip, x)
74
        # stack their channels to feed to both convolution blocks
75
        x = torch.cat((x, x_attention), dim=1)
76
        x = self.conv1(x)
77
        return x
78
79
80
class AttentionUNet(nn.Module):
81
    def __init__(self, in_ch=3, out_ch=1):
82
        super(AttentionUNet, self).__init__()
83
84
        n1 = 16
85
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
86
87
        self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2)
88
        self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2)
89
        self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2)
90
        self.Maxpool4 = nn.MaxPool3d(kernel_size=(2,2,1), stride=(2,2,1))
91
92
        self.Conv1 = conv_block(in_ch, filters[0])
93
        self.Conv2 = conv_block(filters[0], filters[1])
94
        self.Conv3 = conv_block(filters[1], filters[2])
95
        self.Conv4 = conv_block(filters[2], filters[3])
96
        self.Conv5 = conv_block(filters[3], filters[4])
97
98
        self.Up_conv5 = AttentionUp(filters[4], filters[3])
99
100
        self.Up_conv4 = AttentionUp(filters[3], filters[2])
101
102
        self.Up_conv3 = AttentionUp(filters[2], filters[1])
103
104
        self.Up_conv2 = AttentionUp(filters[1], filters[0])
105
106
        self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
107
108
    def forward(self, x):
109
        e1 = self.Conv1(x)
110
111
        e2 = self.Maxpool1(e1)
112
        e2 = self.Conv2(e2)
113
114
        e3 = self.Maxpool2(e2)
115
        e3 = self.Conv3(e3)
116
117
        e4 = self.Maxpool3(e3)
118
        e4 = self.Conv4(e4)
119
120
        e5 = self.Maxpool4(e4)
121
        e5 = self.Conv5(e5)
122
123
        d4 = self.Up_conv5(e5,e4)
124
125
        d3 = self.Up_conv4(d4,e3)
126
127
        d2 = self.Up_conv3(d3,e2)
128
129
        d1 = self.Up_conv2(d2,e1)
130
131
        out = self.Conv(d1)
132
133
        return out
134
135
136
if __name__ == '__main__':
137
    x = torch.randn(2,1,96,96,48)
138
    model = AttentionUNet(in_ch=1,out_ch=2)
139
    y = model(x)
140
    print(y.shape)