Diff of /make_3D_DenseSeg.py [000000] .. [168bda]

Switch to unified view

a b/make_3D_DenseSeg.py
1
from __future__ import print_function
2
caffe_root = '/home/toanhoi/caffe/'
3
import sys
4
sys.path.insert(0, caffe_root + 'python')
5
import caffe
6
import math
7
from caffe import layers as L
8
from caffe.proto import caffe_pb2
9
10
def bn_relu_conv_bn_relu(bottom, nout, dropout,split):
11
12
    if split == 'train':
13
        use_global_stats = False
14
    else:
15
        use_global_stats=True
16
17
    batch_norm1 = L.BatchNorm(bottom, batch_norm_param=dict(use_global_stats=use_global_stats), in_place=False,
18
                              param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
19
                                     dict(lr_mult=0, decay_mult=0)])
20
    scale1 = L.Scale(batch_norm1, bias_term=True, in_place=True,filler=dict(value=1), bias_filler=dict(value=0))
21
    relu1 = L.ReLU(scale1, in_place=True)
22
    conv1 = L.Convolution(relu1, kernel_size=[1, 1, 1], pad=[0, 0, 0], stride=[1,1,1],
23
                          param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
24
                          num_output=nout * 4, axis=1, weight_filler=dict(type='msra'),
25
                          bias_filler=dict(type='constant'))
26
27
    batch_norm2 = L.BatchNorm(conv1, batch_norm_param=dict(use_global_stats=use_global_stats), in_place=False,
28
                              param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
29
                                     dict(lr_mult=0, decay_mult=0)])
30
    scale2 = L.Scale(batch_norm2, bias_term=True, in_place=True,filler=dict(value=1), bias_filler=dict(value=0))
31
    relu2 = L.ReLU(scale2, in_place=True)
32
    conv2 = L.Convolution(relu2, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
33
                          axis=1, num_output=nout, pad=[1, 1, 1], kernel_size=[3, 3, 3], stride=[1,1,1],
34
                          weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
35
36
    if dropout > 0:
37
        conv2 = L.Dropout(conv2, dropout_ratio=dropout)
38
    return conv2
39
40
41
def add_layer(bottom, num_filter, dropout,split):
42
    conv = bn_relu_conv_bn_relu(bottom, nout=num_filter, dropout=dropout,split=split)
43
    concate = L.Concat(bottom, conv, axis=1)
44
    return concate
45
46
47
def transition(bottom, num_filter, split):
48
49
    if split == 'train':
50
        use_global_stats = False
51
    else:
52
        use_global_stats=True
53
54
    batch_norm1 = L.BatchNorm(bottom, batch_norm_param=dict(use_global_stats=use_global_stats), in_place=False,
55
                              param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
56
                                     dict(lr_mult=0, decay_mult=0)])
57
    scale1 = L.Scale(batch_norm1, bias_term=True, in_place=True,filler=dict(value=1), bias_filler=dict(value=0))
58
    relu1 = L.ReLU(scale1, in_place=True)
59
    conv1 = L.Convolution(relu1, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
60
                          axis=1, num_output=num_filter, pad=[0, 0, 0], kernel_size=[1, 1, 1],stride=[1,1,1],
61
                          weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
62
63
    batch_norm2 = L.BatchNorm(conv1, batch_norm_param=dict(use_global_stats=use_global_stats), in_place=False,
64
                               param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
65
                                  dict(lr_mult=0, decay_mult=0)])
66
    scale2 = L.Scale(batch_norm2, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
67
    relu2 = L.ReLU(scale2, in_place=True)
68
69
    conv_down = L.Convolution(relu2, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
70
                          axis=1, num_output=num_filter, pad=[0, 0, 0], kernel_size=[2, 2, 2], stride=2,
71
                          weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
72
73
74
    #pooling = L.Pooling(conv1, type="Pooling", pool=P.Pooling.MAX, kernel_size=2, stride=2, engine=1)
75
    return conv_down
76
77
# first_output -- #channels before entering the first dense block, set it to be comparable to growth_rate
78
# growth_rate -- growth rate
79
# dropout -- set to 0 to disable dropout, non-zero number to set dropout rate
80
def densenet(split, batch_size=4, first_output=32, growth_rate=16, dropout=0.2):
81
    source_train_path = './train_list.txt'
82
    source_test_path = './test_list.txt'
83
    patch_size = [64, 64, 64]
84
    n = caffe.NetSpec()
85
    num_classes = 4
86
    reduction= 0.5
87
    N=[4,4,4,4]
88
    if split == 'train':
89
        n.data, n.label = L.HDF5Data(name="data", batch_size=batch_size, source=source_train_path, ntop=2, shuffle=True,
90
                                     transform_param=dict(crop_size_l=patch_size[0], crop_size_h=patch_size[1],
91
                                                          crop_size_w=patch_size[2]), include={'phase': caffe.TRAIN})
92
    elif split == 'val':
93
        n.data, n.label = L.HDF5Data(name="data", batch_size=batch_size, source=source_test_path, ntop=2, shuffle=True,
94
                                     transform_param=dict(crop_size_l=patch_size[0], crop_size_h=patch_size[1],
95
                                                          crop_size_w=patch_size[2]),
96
                                     include={'phase': caffe.TEST})
97
    else:
98
        n.data = L.Input(name="data", ntop=1, input_param={'shape': {'dim': [1, 2, patch_size[0], patch_size[1], patch_size[2]]}})
99
100
    nchannels = first_output
101
102
    # Fist layers
103
    n.conv1a = L.Convolution(n.data, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
104
                             axis=1, num_output=nchannels, pad=[1,1,1], kernel_size=[3, 3, 3], stride=[1,1,1],
105
                             weight_filler=dict(type='msra'), bias_filler=dict(type='constant',value=-0.1))
106
107
    if split == 'train':
108
        use_global_stats = False
109
    else:
110
        use_global_stats=True
111
112
    n.bnorm1a = L.BatchNorm(n.conv1a, batch_norm_param=dict(use_global_stats=use_global_stats), param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
113
                                     dict(lr_mult=0, decay_mult=0)],  in_place=False)
114
115
    n.scale1a = L.Scale(n.bnorm1a, in_place=True, bias_term=True,filler=dict(value=1), bias_filler=dict(value=0))
116
    n.relu1a = L.ReLU(n.bnorm1a, in_place=True)
117
118
119
    # conv 1b, after BN set bias_term=false
120
    n.conv1b = L.Convolution(n.relu1a, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
121
                             axis=1, num_output=nchannels, pad=[1, 1, 1], kernel_size=[3, 3, 3], stride=[1,1,1],
122
                             weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
123
124
    n.bnorm1b = L.BatchNorm(n.conv1b, batch_norm_param=dict(use_global_stats=use_global_stats), param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
125
                                     dict(lr_mult=0, decay_mult=0)], in_place=False)
126
    n.scale1b = L.Scale(n.bnorm1b, in_place=True, bias_term=True, filler=dict(value=1), bias_filler=dict(value=0))
127
    n.relu1b = L.ReLU(n.bnorm1b, in_place=True)
128
129
    n.conv1c = L.Convolution(n.relu1b, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
130
                             axis=1, num_output=nchannels, pad=[1, 1, 1], kernel_size=[3, 3, 3],stride=[1,1,1],
131
                             weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
132
    print (nchannels)
133
134
    # model = L.Pooling(n.conv1c, type="Pooling", pool=P.Pooling.MAX, kernel_size=2, stride=2, engine=1)
135
136
    n.bnorm1c = L.BatchNorm(n.conv1c, batch_norm_param=dict(use_global_stats=use_global_stats), param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
137
                                     dict(lr_mult=0, decay_mult=0)], in_place=False)
138
    n.scale1c = L.Scale(n.bnorm1c, in_place=True, bias_term=True, filler=dict(value=1), bias_filler=dict(value=0))
139
    n.relu1c = L.ReLU(n.bnorm1c, in_place=True)
140
141
    model = L.Convolution(n.relu1c, param=[dict(lr_mult=1, decay_mult=1)], bias_term=False,
142
                             axis=1, num_output=nchannels, pad=[0, 0, 0], kernel_size=[2, 2, 2], stride=2,
143
                             weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
144
    n.__setattr__("Conv_down_1", model)
145
146
    # ===============Dense block 2=====================
147
    for i in range(N[0]):
148
        if (i == 0):
149
            concat = add_layer(model, growth_rate, dropout,split)
150
            n.__setattr__("Concat_%d" % (i + 1), concat)
151
            nchannels += growth_rate
152
            continue
153
        concat = add_layer(concat, growth_rate, dropout,split)
154
        n.__setattr__("Concat_%d" % (i + 1), concat)
155
        nchannels += growth_rate
156
    # ===============End dense block 2=================
157
    print (nchannels)
158
    # ===============Deconvolution layer 2==============
159
    model_deconv_x2 = L.Deconvolution(concat, param=[dict(lr_mult=0.1, decay_mult=1)],
160
                                      convolution_param=dict(kernel_size=[4,4,4], stride=[2,2,2], num_output=num_classes,
161
                                                             pad=[1, 1, 1], group=num_classes,
162
                                                             weight_filler=dict(type='bilinear_3D'),
163
                                                             bias_term=False))
164
    n.__setattr__("Deconvolution_%d" % (N[0] + 1), model_deconv_x2)
165
    # ===============End Deconvolution layer 2==============
166
167
    # ===============Transition layer 2=================
168
    model = transition(concat, int(math.floor(nchannels * reduction)), split)
169
    n.__setattr__("Conv_down_%d" % (N[0] + 1), model)
170
    nchannels = int(math.floor(nchannels * reduction))
171
    # ===============End Transition layer2==============
172
173
    # ===============Dense block 3=====================
174
    for i in range(N[1]):
175
        if (i == 0):
176
            concat = add_layer(model, growth_rate, dropout, split)
177
            n.__setattr__("Concat_%d" % (N[1] + i + 2), concat)
178
            nchannels += growth_rate
179
            continue
180
        concat = add_layer(concat, growth_rate, dropout, split)
181
        n.__setattr__("Concat_%d" % (N[1] + i + 2), concat)
182
        nchannels += growth_rate
183
    # ===============End dense block 3=================
184
    print (nchannels)
185
    # ===============Deconvolution layer 3==============
186
    model_deconv_x4 = L.Deconvolution(concat, param=[dict(lr_mult=0.1, decay_mult=1)],
187
                                      convolution_param=dict(kernel_size=[6,6,6], stride=[4,4,4], num_output=num_classes,
188
                                                             pad=[1, 1, 1], group=num_classes,
189
                                                             weight_filler=dict(type='bilinear_3D'),
190
                                                             bias_term=False))
191
    n.__setattr__("Deconvolution_%d" % (N[0] + N[1] + 2), model_deconv_x4)
192
    # ==============Transition layer 3=================
193
    model = transition(concat, int(math.floor(nchannels * reduction)), split)
194
    n.__setattr__("Conv_down_%d" % (N[0] + N[1] + 2), model)
195
    # ===============End Transition layer3==============
196
    nchannels = int(math.floor(nchannels * reduction))
197
198
    # ===============Dense block 4=====================
199
    for i in range(N[2]):
200
        if (i == 0):
201
            concat = add_layer(model, growth_rate, dropout, split)
202
            n.__setattr__("Concat_%d" % (N[0] + N[1] + i + 3), concat)
203
            nchannels += growth_rate
204
            continue
205
        concat = add_layer(concat, growth_rate, dropout, split)
206
        n.__setattr__("Concat_%d" % (N[0] + N[1] + i + 3), concat)
207
        nchannels += growth_rate
208
    # ===============End dense block 4=================
209
210
    # ===============Transition layer 4=================
211
    print(nchannels)
212
213
    # ===============Deconvolution layer 4==============
214
    model_deconv_x8 = L.Deconvolution(concat, param=[dict(lr_mult=0.1, decay_mult=1)],
215
                                      convolution_param=dict(kernel_size=[10,10,10], stride=[8,8,8], num_output=num_classes,
216
                                                             pad=[1, 1, 1], group=num_classes,
217
                                                             weight_filler=dict(type='bilinear_3D'),
218
                                                             bias_term=False))
219
    n.__setattr__("Deconvolution_%d" % (N[0] + N[1] + N[2] + 3), model_deconv_x8)
220
    # ===============End Deconvolution layer 4==============
221
222
    # ===============Transition layer 4=================
223
    model = transition(concat, int(math.floor(nchannels * reduction)), split)
224
    n.__setattr__("Conv_down_%d" % (N[0] + N[1] + N[2] + 3), model)
225
    nchannels = int(math.floor(nchannels * reduction))
226
    # ===============End Transition layer3==============
227
228
    # ===============Dense block 5=====================
229
    for i in range(N[3]):
230
        if (i == 0):
231
            concat = add_layer(model, growth_rate, dropout, split)
232
            n.__setattr__("Concat_%d" % (N[0] + N[1] + N[2] + N[3] + i + 3), concat)
233
            nchannels += growth_rate
234
            continue
235
        concat = add_layer(concat, growth_rate, dropout, split)
236
        n.__setattr__("Concat_%d" % (N[0] + N[1] + N[2] + N[3] + i + 3), concat)
237
        nchannels += growth_rate
238
    # ===============End dense block 5=================
239
    print(nchannels)
240
241
    # ===============Deconvolution layer 5==============
242
    model_deconv_x16 = L.Deconvolution(concat, param=[dict(lr_mult=0.1, decay_mult=1)],
243
                                       convolution_param=dict(kernel_size=[18, 18, 18], stride=[16,16,16],
244
                                                              num_output=num_classes,
245
                                                              pad=[1, 1, 1], group=num_classes,
246
                                                              weight_filler=dict(type='bilinear_3D'),
247
                                                              bias_term=False))
248
    n.__setattr__("Deconvolution_%d" % (N[0] + N[1] + N[2] + N[3] + 4), model_deconv_x16)
249
    model = L.Concat(n.conv1c,model_deconv_x2, model_deconv_x4, model_deconv_x8, model_deconv_x16,
250
                     axis=1)
251
252
    n.bnorm_concat= L.BatchNorm(model, batch_norm_param=dict(use_global_stats=use_global_stats), param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
253
                                     dict(lr_mult=0, decay_mult=0)], in_place=False)
254
    n.scale_concat = L.Scale(n.bnorm_concat, in_place=True, bias_term=True, filler=dict(value=1), bias_filler=dict(value=0))
255
    n.relu_concat = L.ReLU(n.scale_concat, in_place=True)
256
    model_conv_concate = L.Convolution(n.relu_concat,
257
                                       param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
258
                                       axis=1, num_output=num_classes, pad=[0, 0, 0], kernel_size=[1, 1, 1],
259
                                       weight_filler=dict(type='msra'))
260
261
    if (split == 'train'):
262
        n.loss = L.SoftmaxWithLoss(model_conv_concate, n.label)
263
264
    elif (split == 'val'):
265
        n.loss = L.SoftmaxWithLoss(model_conv_concate, n.label)
266
    else:
267
        n.softmax = L.Softmax(model_conv_concate, ntop=1, in_place=False)
268
    return n.to_proto()
269
270
271
def make_net():
272
    with open('train_3d_denseseg.prototxt', 'w') as f:
273
        print(str(densenet('train', batch_size=4)), file=f)
274
275
    with open('test_3d_denseseg.prototxt', 'w') as f:
276
        print(str(densenet('val', batch_size=4)), file=f)
277
    with open('deploy_3d_denseseg.prototxt', 'w') as f:
278
        print(str(densenet('deploy', batch_size=0)), file=f)
279
280
def make_solver():
281
    s = caffe_pb2.SolverParameter()
282
    s.random_seed = 0xCAFFE
283
284
    s.train_net = 'train_3d_denseseg.prototxt'
285
286
    s.max_iter = 200000
287
    s.type = 'Adam'
288
    s.display = 20
289
290
    s.base_lr = 0.0002
291
    #s.power=0.9
292
293
    s.momentum = 0.97
294
    s.weight_decay = 0.0005
295
    s.average_loss=20
296
    s.iter_size = 1
297
    s.lr_policy='step'
298
    s.stepsize=50000
299
    s.gamma = 0.1
300
    s.snapshot_prefix ='./snapshot/3d_denseseg_iseg'
301
    s.snapshot = 2000
302
    s.solver_mode = caffe_pb2.SolverParameter.GPU
303
304
    solver_path = 'solver.prototxt'
305
    with open(solver_path, 'w') as f:
306
        f.write(str(s))
307
308
if __name__ == '__main__':
309
    make_net()
310
    make_solver()
311
312
313
314
315
316
317
318
319