Switch to unified view

a b/train_3d_denseseg.prototxt
1
layer {
2
  name: "data"
3
  type: "HDF5Data"
4
  top: "data"
5
  top: "label"
6
  include {
7
    phase: TRAIN
8
  }
9
  transform_param {
10
    crop_size_w: 64
11
    crop_size_h: 64
12
    crop_size_l: 64
13
  }
14
  hdf5_data_param {
15
    source: "./train_list.txt"
16
    batch_size: 4
17
    shuffle: true
18
  }
19
}
20
layer {
21
  name: "conv1a"
22
  type: "Convolution"
23
  bottom: "data"
24
  top: "conv1a"
25
  param {
26
    lr_mult: 1.0
27
    decay_mult: 1.0
28
  }
29
  param {
30
    lr_mult: 2.0
31
    decay_mult: 0.0
32
  }
33
  convolution_param {
34
    num_output: 32
35
    pad: 1
36
    pad: 1
37
    pad: 1
38
    kernel_size: 3
39
    kernel_size: 3
40
    kernel_size: 3
41
    stride: 1
42
    stride: 1
43
    stride: 1
44
    weight_filler {
45
      type: "msra"
46
    }
47
    bias_filler {
48
      type: "constant"
49
      value: -0.10000000149
50
    }
51
    axis: 1
52
  }
53
}
54
layer {
55
  name: "bnorm1a"
56
  type: "BatchNorm"
57
  bottom: "conv1a"
58
  top: "bnorm1a"
59
  param {
60
    lr_mult: 0.0
61
    decay_mult: 0.0
62
  }
63
  param {
64
    lr_mult: 0.0
65
    decay_mult: 0.0
66
  }
67
  param {
68
    lr_mult: 0.0
69
    decay_mult: 0.0
70
  }
71
  batch_norm_param {
72
    use_global_stats: false
73
  }
74
}
75
layer {
76
  name: "scale1a"
77
  type: "Scale"
78
  bottom: "bnorm1a"
79
  top: "bnorm1a"
80
  scale_param {
81
    filler {
82
      value: 1.0
83
    }
84
    bias_term: true
85
    bias_filler {
86
      value: 0.0
87
    }
88
  }
89
}
90
layer {
91
  name: "relu1a"
92
  type: "ReLU"
93
  bottom: "bnorm1a"
94
  top: "bnorm1a"
95
}
96
layer {
97
  name: "conv1b"
98
  type: "Convolution"
99
  bottom: "bnorm1a"
100
  top: "conv1b"
101
  param {
102
    lr_mult: 1.0
103
    decay_mult: 1.0
104
  }
105
  convolution_param {
106
    num_output: 32
107
    bias_term: false
108
    pad: 1
109
    pad: 1
110
    pad: 1
111
    kernel_size: 3
112
    kernel_size: 3
113
    kernel_size: 3
114
    stride: 1
115
    stride: 1
116
    stride: 1
117
    weight_filler {
118
      type: "msra"
119
    }
120
    bias_filler {
121
      type: "constant"
122
    }
123
    axis: 1
124
  }
125
}
126
layer {
127
  name: "bnorm1b"
128
  type: "BatchNorm"
129
  bottom: "conv1b"
130
  top: "bnorm1b"
131
  param {
132
    lr_mult: 0.0
133
    decay_mult: 0.0
134
  }
135
  param {
136
    lr_mult: 0.0
137
    decay_mult: 0.0
138
  }
139
  param {
140
    lr_mult: 0.0
141
    decay_mult: 0.0
142
  }
143
  batch_norm_param {
144
    use_global_stats: false
145
  }
146
}
147
layer {
148
  name: "scale1b"
149
  type: "Scale"
150
  bottom: "bnorm1b"
151
  top: "bnorm1b"
152
  scale_param {
153
    filler {
154
      value: 1.0
155
    }
156
    bias_term: true
157
    bias_filler {
158
      value: 0.0
159
    }
160
  }
161
}
162
layer {
163
  name: "relu1b"
164
  type: "ReLU"
165
  bottom: "bnorm1b"
166
  top: "bnorm1b"
167
}
168
layer {
169
  name: "conv1c"
170
  type: "Convolution"
171
  bottom: "bnorm1b"
172
  top: "conv1c"
173
  param {
174
    lr_mult: 1.0
175
    decay_mult: 1.0
176
  }
177
  convolution_param {
178
    num_output: 32
179
    bias_term: false
180
    pad: 1
181
    pad: 1
182
    pad: 1
183
    kernel_size: 3
184
    kernel_size: 3
185
    kernel_size: 3
186
    stride: 1
187
    stride: 1
188
    stride: 1
189
    weight_filler {
190
      type: "msra"
191
    }
192
    bias_filler {
193
      type: "constant"
194
    }
195
    axis: 1
196
  }
197
}
198
layer {
199
  name: "bnorm1c"
200
  type: "BatchNorm"
201
  bottom: "conv1c"
202
  top: "bnorm1c"
203
  param {
204
    lr_mult: 0.0
205
    decay_mult: 0.0
206
  }
207
  param {
208
    lr_mult: 0.0
209
    decay_mult: 0.0
210
  }
211
  param {
212
    lr_mult: 0.0
213
    decay_mult: 0.0
214
  }
215
  batch_norm_param {
216
    use_global_stats: false
217
  }
218
}
219
layer {
220
  name: "scale1c"
221
  type: "Scale"
222
  bottom: "bnorm1c"
223
  top: "bnorm1c"
224
  scale_param {
225
    filler {
226
      value: 1.0
227
    }
228
    bias_term: true
229
    bias_filler {
230
      value: 0.0
231
    }
232
  }
233
}
234
layer {
235
  name: "relu1c"
236
  type: "ReLU"
237
  bottom: "bnorm1c"
238
  top: "bnorm1c"
239
}
240
layer {
241
  name: "Conv_down_1"
242
  type: "Convolution"
243
  bottom: "bnorm1c"
244
  top: "Conv_down_1"
245
  param {
246
    lr_mult: 1.0
247
    decay_mult: 1.0
248
  }
249
  convolution_param {
250
    num_output: 32
251
    bias_term: false
252
    pad: 0
253
    pad: 0
254
    pad: 0
255
    kernel_size: 2
256
    kernel_size: 2
257
    kernel_size: 2
258
    stride: 2
259
    weight_filler {
260
      type: "msra"
261
    }
262
    bias_filler {
263
      type: "constant"
264
    }
265
    axis: 1
266
  }
267
}
268
layer {
269
  name: "BatchNorm1"
270
  type: "BatchNorm"
271
  bottom: "Conv_down_1"
272
  top: "BatchNorm1"
273
  param {
274
    lr_mult: 0.0
275
    decay_mult: 0.0
276
  }
277
  param {
278
    lr_mult: 0.0
279
    decay_mult: 0.0
280
  }
281
  param {
282
    lr_mult: 0.0
283
    decay_mult: 0.0
284
  }
285
  batch_norm_param {
286
    use_global_stats: false
287
  }
288
}
289
layer {
290
  name: "Scale1"
291
  type: "Scale"
292
  bottom: "BatchNorm1"
293
  top: "BatchNorm1"
294
  scale_param {
295
    filler {
296
      value: 1.0
297
    }
298
    bias_term: true
299
    bias_filler {
300
      value: 0.0
301
    }
302
  }
303
}
304
layer {
305
  name: "ReLU1"
306
  type: "ReLU"
307
  bottom: "BatchNorm1"
308
  top: "BatchNorm1"
309
}
310
layer {
311
  name: "Convolution1"
312
  type: "Convolution"
313
  bottom: "BatchNorm1"
314
  top: "Convolution1"
315
  param {
316
    lr_mult: 1.0
317
    decay_mult: 1.0
318
  }
319
  convolution_param {
320
    num_output: 64
321
    bias_term: false
322
    pad: 0
323
    pad: 0
324
    pad: 0
325
    kernel_size: 1
326
    kernel_size: 1
327
    kernel_size: 1
328
    stride: 1
329
    stride: 1
330
    stride: 1
331
    weight_filler {
332
      type: "msra"
333
    }
334
    bias_filler {
335
      type: "constant"
336
    }
337
    axis: 1
338
  }
339
}
340
layer {
341
  name: "BatchNorm2"
342
  type: "BatchNorm"
343
  bottom: "Convolution1"
344
  top: "BatchNorm2"
345
  param {
346
    lr_mult: 0.0
347
    decay_mult: 0.0
348
  }
349
  param {
350
    lr_mult: 0.0
351
    decay_mult: 0.0
352
  }
353
  param {
354
    lr_mult: 0.0
355
    decay_mult: 0.0
356
  }
357
  batch_norm_param {
358
    use_global_stats: false
359
  }
360
}
361
layer {
362
  name: "Scale2"
363
  type: "Scale"
364
  bottom: "BatchNorm2"
365
  top: "BatchNorm2"
366
  scale_param {
367
    filler {
368
      value: 1.0
369
    }
370
    bias_term: true
371
    bias_filler {
372
      value: 0.0
373
    }
374
  }
375
}
376
layer {
377
  name: "ReLU2"
378
  type: "ReLU"
379
  bottom: "BatchNorm2"
380
  top: "BatchNorm2"
381
}
382
layer {
383
  name: "Convolution2"
384
  type: "Convolution"
385
  bottom: "BatchNorm2"
386
  top: "Convolution2"
387
  param {
388
    lr_mult: 1.0
389
    decay_mult: 1.0
390
  }
391
  convolution_param {
392
    num_output: 16
393
    bias_term: false
394
    pad: 1
395
    pad: 1
396
    pad: 1
397
    kernel_size: 3
398
    kernel_size: 3
399
    kernel_size: 3
400
    stride: 1
401
    stride: 1
402
    stride: 1
403
    weight_filler {
404
      type: "msra"
405
    }
406
    bias_filler {
407
      type: "constant"
408
    }
409
    axis: 1
410
  }
411
}
412
layer {
413
  name: "Dropout1"
414
  type: "Dropout"
415
  bottom: "Convolution2"
416
  top: "Dropout1"
417
  dropout_param {
418
    dropout_ratio: 0.20000000298
419
  }
420
}
421
layer {
422
  name: "Concat_1"
423
  type: "Concat"
424
  bottom: "Conv_down_1"
425
  bottom: "Dropout1"
426
  top: "Concat_1"
427
  concat_param {
428
    axis: 1
429
  }
430
}
431
layer {
432
  name: "BatchNorm3"
433
  type: "BatchNorm"
434
  bottom: "Concat_1"
435
  top: "BatchNorm3"
436
  param {
437
    lr_mult: 0.0
438
    decay_mult: 0.0
439
  }
440
  param {
441
    lr_mult: 0.0
442
    decay_mult: 0.0
443
  }
444
  param {
445
    lr_mult: 0.0
446
    decay_mult: 0.0
447
  }
448
  batch_norm_param {
449
    use_global_stats: false
450
  }
451
}
452
layer {
453
  name: "Scale3"
454
  type: "Scale"
455
  bottom: "BatchNorm3"
456
  top: "BatchNorm3"
457
  scale_param {
458
    filler {
459
      value: 1.0
460
    }
461
    bias_term: true
462
    bias_filler {
463
      value: 0.0
464
    }
465
  }
466
}
467
layer {
468
  name: "ReLU3"
469
  type: "ReLU"
470
  bottom: "BatchNorm3"
471
  top: "BatchNorm3"
472
}
473
layer {
474
  name: "Convolution3"
475
  type: "Convolution"
476
  bottom: "BatchNorm3"
477
  top: "Convolution3"
478
  param {
479
    lr_mult: 1.0
480
    decay_mult: 1.0
481
  }
482
  convolution_param {
483
    num_output: 64
484
    bias_term: false
485
    pad: 0
486
    pad: 0
487
    pad: 0
488
    kernel_size: 1
489
    kernel_size: 1
490
    kernel_size: 1
491
    stride: 1
492
    stride: 1
493
    stride: 1
494
    weight_filler {
495
      type: "msra"
496
    }
497
    bias_filler {
498
      type: "constant"
499
    }
500
    axis: 1
501
  }
502
}
503
layer {
504
  name: "BatchNorm4"
505
  type: "BatchNorm"
506
  bottom: "Convolution3"
507
  top: "BatchNorm4"
508
  param {
509
    lr_mult: 0.0
510
    decay_mult: 0.0
511
  }
512
  param {
513
    lr_mult: 0.0
514
    decay_mult: 0.0
515
  }
516
  param {
517
    lr_mult: 0.0
518
    decay_mult: 0.0
519
  }
520
  batch_norm_param {
521
    use_global_stats: false
522
  }
523
}
524
layer {
525
  name: "Scale4"
526
  type: "Scale"
527
  bottom: "BatchNorm4"
528
  top: "BatchNorm4"
529
  scale_param {
530
    filler {
531
      value: 1.0
532
    }
533
    bias_term: true
534
    bias_filler {
535
      value: 0.0
536
    }
537
  }
538
}
539
layer {
540
  name: "ReLU4"
541
  type: "ReLU"
542
  bottom: "BatchNorm4"
543
  top: "BatchNorm4"
544
}
545
layer {
546
  name: "Convolution4"
547
  type: "Convolution"
548
  bottom: "BatchNorm4"
549
  top: "Convolution4"
550
  param {
551
    lr_mult: 1.0
552
    decay_mult: 1.0
553
  }
554
  convolution_param {
555
    num_output: 16
556
    bias_term: false
557
    pad: 1
558
    pad: 1
559
    pad: 1
560
    kernel_size: 3
561
    kernel_size: 3
562
    kernel_size: 3
563
    stride: 1
564
    stride: 1
565
    stride: 1
566
    weight_filler {
567
      type: "msra"
568
    }
569
    bias_filler {
570
      type: "constant"
571
    }
572
    axis: 1
573
  }
574
}
575
layer {
576
  name: "Dropout2"
577
  type: "Dropout"
578
  bottom: "Convolution4"
579
  top: "Dropout2"
580
  dropout_param {
581
    dropout_ratio: 0.20000000298
582
  }
583
}
584
layer {
585
  name: "Concat_2"
586
  type: "Concat"
587
  bottom: "Concat_1"
588
  bottom: "Dropout2"
589
  top: "Concat_2"
590
  concat_param {
591
    axis: 1
592
  }
593
}
594
layer {
595
  name: "BatchNorm5"
596
  type: "BatchNorm"
597
  bottom: "Concat_2"
598
  top: "BatchNorm5"
599
  param {
600
    lr_mult: 0.0
601
    decay_mult: 0.0
602
  }
603
  param {
604
    lr_mult: 0.0
605
    decay_mult: 0.0
606
  }
607
  param {
608
    lr_mult: 0.0
609
    decay_mult: 0.0
610
  }
611
  batch_norm_param {
612
    use_global_stats: false
613
  }
614
}
615
layer {
616
  name: "Scale5"
617
  type: "Scale"
618
  bottom: "BatchNorm5"
619
  top: "BatchNorm5"
620
  scale_param {
621
    filler {
622
      value: 1.0
623
    }
624
    bias_term: true
625
    bias_filler {
626
      value: 0.0
627
    }
628
  }
629
}
630
layer {
631
  name: "ReLU5"
632
  type: "ReLU"
633
  bottom: "BatchNorm5"
634
  top: "BatchNorm5"
635
}
636
layer {
637
  name: "Convolution5"
638
  type: "Convolution"
639
  bottom: "BatchNorm5"
640
  top: "Convolution5"
641
  param {
642
    lr_mult: 1.0
643
    decay_mult: 1.0
644
  }
645
  convolution_param {
646
    num_output: 64
647
    bias_term: false
648
    pad: 0
649
    pad: 0
650
    pad: 0
651
    kernel_size: 1
652
    kernel_size: 1
653
    kernel_size: 1
654
    stride: 1
655
    stride: 1
656
    stride: 1
657
    weight_filler {
658
      type: "msra"
659
    }
660
    bias_filler {
661
      type: "constant"
662
    }
663
    axis: 1
664
  }
665
}
666
layer {
667
  name: "BatchNorm6"
668
  type: "BatchNorm"
669
  bottom: "Convolution5"
670
  top: "BatchNorm6"
671
  param {
672
    lr_mult: 0.0
673
    decay_mult: 0.0
674
  }
675
  param {
676
    lr_mult: 0.0
677
    decay_mult: 0.0
678
  }
679
  param {
680
    lr_mult: 0.0
681
    decay_mult: 0.0
682
  }
683
  batch_norm_param {
684
    use_global_stats: false
685
  }
686
}
687
layer {
688
  name: "Scale6"
689
  type: "Scale"
690
  bottom: "BatchNorm6"
691
  top: "BatchNorm6"
692
  scale_param {
693
    filler {
694
      value: 1.0
695
    }
696
    bias_term: true
697
    bias_filler {
698
      value: 0.0
699
    }
700
  }
701
}
702
layer {
703
  name: "ReLU6"
704
  type: "ReLU"
705
  bottom: "BatchNorm6"
706
  top: "BatchNorm6"
707
}
708
layer {
709
  name: "Convolution6"
710
  type: "Convolution"
711
  bottom: "BatchNorm6"
712
  top: "Convolution6"
713
  param {
714
    lr_mult: 1.0
715
    decay_mult: 1.0
716
  }
717
  convolution_param {
718
    num_output: 16
719
    bias_term: false
720
    pad: 1
721
    pad: 1
722
    pad: 1
723
    kernel_size: 3
724
    kernel_size: 3
725
    kernel_size: 3
726
    stride: 1
727
    stride: 1
728
    stride: 1
729
    weight_filler {
730
      type: "msra"
731
    }
732
    bias_filler {
733
      type: "constant"
734
    }
735
    axis: 1
736
  }
737
}
738
layer {
739
  name: "Dropout3"
740
  type: "Dropout"
741
  bottom: "Convolution6"
742
  top: "Dropout3"
743
  dropout_param {
744
    dropout_ratio: 0.20000000298
745
  }
746
}
747
layer {
748
  name: "Concat_3"
749
  type: "Concat"
750
  bottom: "Concat_2"
751
  bottom: "Dropout3"
752
  top: "Concat_3"
753
  concat_param {
754
    axis: 1
755
  }
756
}
757
layer {
758
  name: "BatchNorm7"
759
  type: "BatchNorm"
760
  bottom: "Concat_3"
761
  top: "BatchNorm7"
762
  param {
763
    lr_mult: 0.0
764
    decay_mult: 0.0
765
  }
766
  param {
767
    lr_mult: 0.0
768
    decay_mult: 0.0
769
  }
770
  param {
771
    lr_mult: 0.0
772
    decay_mult: 0.0
773
  }
774
  batch_norm_param {
775
    use_global_stats: false
776
  }
777
}
778
layer {
779
  name: "Scale7"
780
  type: "Scale"
781
  bottom: "BatchNorm7"
782
  top: "BatchNorm7"
783
  scale_param {
784
    filler {
785
      value: 1.0
786
    }
787
    bias_term: true
788
    bias_filler {
789
      value: 0.0
790
    }
791
  }
792
}
793
layer {
794
  name: "ReLU7"
795
  type: "ReLU"
796
  bottom: "BatchNorm7"
797
  top: "BatchNorm7"
798
}
799
layer {
800
  name: "Convolution7"
801
  type: "Convolution"
802
  bottom: "BatchNorm7"
803
  top: "Convolution7"
804
  param {
805
    lr_mult: 1.0
806
    decay_mult: 1.0
807
  }
808
  convolution_param {
809
    num_output: 64
810
    bias_term: false
811
    pad: 0
812
    pad: 0
813
    pad: 0
814
    kernel_size: 1
815
    kernel_size: 1
816
    kernel_size: 1
817
    stride: 1
818
    stride: 1
819
    stride: 1
820
    weight_filler {
821
      type: "msra"
822
    }
823
    bias_filler {
824
      type: "constant"
825
    }
826
    axis: 1
827
  }
828
}
829
layer {
830
  name: "BatchNorm8"
831
  type: "BatchNorm"
832
  bottom: "Convolution7"
833
  top: "BatchNorm8"
834
  param {
835
    lr_mult: 0.0
836
    decay_mult: 0.0
837
  }
838
  param {
839
    lr_mult: 0.0
840
    decay_mult: 0.0
841
  }
842
  param {
843
    lr_mult: 0.0
844
    decay_mult: 0.0
845
  }
846
  batch_norm_param {
847
    use_global_stats: false
848
  }
849
}
850
layer {
851
  name: "Scale8"
852
  type: "Scale"
853
  bottom: "BatchNorm8"
854
  top: "BatchNorm8"
855
  scale_param {
856
    filler {
857
      value: 1.0
858
    }
859
    bias_term: true
860
    bias_filler {
861
      value: 0.0
862
    }
863
  }
864
}
865
layer {
866
  name: "ReLU8"
867
  type: "ReLU"
868
  bottom: "BatchNorm8"
869
  top: "BatchNorm8"
870
}
871
layer {
872
  name: "Convolution8"
873
  type: "Convolution"
874
  bottom: "BatchNorm8"
875
  top: "Convolution8"
876
  param {
877
    lr_mult: 1.0
878
    decay_mult: 1.0
879
  }
880
  convolution_param {
881
    num_output: 16
882
    bias_term: false
883
    pad: 1
884
    pad: 1
885
    pad: 1
886
    kernel_size: 3
887
    kernel_size: 3
888
    kernel_size: 3
889
    stride: 1
890
    stride: 1
891
    stride: 1
892
    weight_filler {
893
      type: "msra"
894
    }
895
    bias_filler {
896
      type: "constant"
897
    }
898
    axis: 1
899
  }
900
}
901
layer {
902
  name: "Dropout4"
903
  type: "Dropout"
904
  bottom: "Convolution8"
905
  top: "Dropout4"
906
  dropout_param {
907
    dropout_ratio: 0.20000000298
908
  }
909
}
910
layer {
911
  name: "Concat_4"
912
  type: "Concat"
913
  bottom: "Concat_3"
914
  bottom: "Dropout4"
915
  top: "Concat_4"
916
  concat_param {
917
    axis: 1
918
  }
919
}
920
layer {
921
  name: "Deconvolution_5"
922
  type: "Deconvolution"
923
  bottom: "Concat_4"
924
  top: "Deconvolution_5"
925
  param {
926
    lr_mult: 0.10000000149
927
    decay_mult: 1.0
928
  }
929
  convolution_param {
930
    num_output: 4
931
    bias_term: false
932
    pad: 1
933
    pad: 1
934
    pad: 1
935
    kernel_size: 4
936
    kernel_size: 4
937
    kernel_size: 4
938
    group: 4
939
    stride: 2
940
    stride: 2
941
    stride: 2
942
    weight_filler {
943
      type: "bilinear_3D"
944
    }
945
  }
946
}
947
layer {
948
  name: "BatchNorm9"
949
  type: "BatchNorm"
950
  bottom: "Concat_4"
951
  top: "BatchNorm9"
952
  param {
953
    lr_mult: 0.0
954
    decay_mult: 0.0
955
  }
956
  param {
957
    lr_mult: 0.0
958
    decay_mult: 0.0
959
  }
960
  param {
961
    lr_mult: 0.0
962
    decay_mult: 0.0
963
  }
964
  batch_norm_param {
965
    use_global_stats: false
966
  }
967
}
968
layer {
969
  name: "Scale9"
970
  type: "Scale"
971
  bottom: "BatchNorm9"
972
  top: "BatchNorm9"
973
  scale_param {
974
    filler {
975
      value: 1.0
976
    }
977
    bias_term: true
978
    bias_filler {
979
      value: 0.0
980
    }
981
  }
982
}
983
layer {
984
  name: "ReLU9"
985
  type: "ReLU"
986
  bottom: "BatchNorm9"
987
  top: "BatchNorm9"
988
}
989
layer {
990
  name: "Convolution9"
991
  type: "Convolution"
992
  bottom: "BatchNorm9"
993
  top: "Convolution9"
994
  param {
995
    lr_mult: 1.0
996
    decay_mult: 1.0
997
  }
998
  convolution_param {
999
    num_output: 48
1000
    bias_term: false
1001
    pad: 0
1002
    pad: 0
1003
    pad: 0
1004
    kernel_size: 1
1005
    kernel_size: 1
1006
    kernel_size: 1
1007
    stride: 1
1008
    stride: 1
1009
    stride: 1
1010
    weight_filler {
1011
      type: "msra"
1012
    }
1013
    bias_filler {
1014
      type: "constant"
1015
    }
1016
    axis: 1
1017
  }
1018
}
1019
layer {
1020
  name: "BatchNorm10"
1021
  type: "BatchNorm"
1022
  bottom: "Convolution9"
1023
  top: "BatchNorm10"
1024
  param {
1025
    lr_mult: 0.0
1026
    decay_mult: 0.0
1027
  }
1028
  param {
1029
    lr_mult: 0.0
1030
    decay_mult: 0.0
1031
  }
1032
  param {
1033
    lr_mult: 0.0
1034
    decay_mult: 0.0
1035
  }
1036
  batch_norm_param {
1037
    use_global_stats: false
1038
  }
1039
}
1040
layer {
1041
  name: "Scale10"
1042
  type: "Scale"
1043
  bottom: "BatchNorm10"
1044
  top: "BatchNorm10"
1045
  scale_param {
1046
    filler {
1047
      value: 1.0
1048
    }
1049
    bias_term: true
1050
    bias_filler {
1051
      value: 0.0
1052
    }
1053
  }
1054
}
1055
layer {
1056
  name: "ReLU10"
1057
  type: "ReLU"
1058
  bottom: "BatchNorm10"
1059
  top: "BatchNorm10"
1060
}
1061
layer {
1062
  name: "Conv_down_5"
1063
  type: "Convolution"
1064
  bottom: "BatchNorm10"
1065
  top: "Conv_down_5"
1066
  param {
1067
    lr_mult: 1.0
1068
    decay_mult: 1.0
1069
  }
1070
  convolution_param {
1071
    num_output: 48
1072
    bias_term: false
1073
    pad: 0
1074
    pad: 0
1075
    pad: 0
1076
    kernel_size: 2
1077
    kernel_size: 2
1078
    kernel_size: 2
1079
    stride: 2
1080
    weight_filler {
1081
      type: "msra"
1082
    }
1083
    bias_filler {
1084
      type: "constant"
1085
    }
1086
    axis: 1
1087
  }
1088
}
1089
layer {
1090
  name: "BatchNorm11"
1091
  type: "BatchNorm"
1092
  bottom: "Conv_down_5"
1093
  top: "BatchNorm11"
1094
  param {
1095
    lr_mult: 0.0
1096
    decay_mult: 0.0
1097
  }
1098
  param {
1099
    lr_mult: 0.0
1100
    decay_mult: 0.0
1101
  }
1102
  param {
1103
    lr_mult: 0.0
1104
    decay_mult: 0.0
1105
  }
1106
  batch_norm_param {
1107
    use_global_stats: false
1108
  }
1109
}
1110
layer {
1111
  name: "Scale11"
1112
  type: "Scale"
1113
  bottom: "BatchNorm11"
1114
  top: "BatchNorm11"
1115
  scale_param {
1116
    filler {
1117
      value: 1.0
1118
    }
1119
    bias_term: true
1120
    bias_filler {
1121
      value: 0.0
1122
    }
1123
  }
1124
}
1125
layer {
1126
  name: "ReLU11"
1127
  type: "ReLU"
1128
  bottom: "BatchNorm11"
1129
  top: "BatchNorm11"
1130
}
1131
layer {
1132
  name: "Convolution10"
1133
  type: "Convolution"
1134
  bottom: "BatchNorm11"
1135
  top: "Convolution10"
1136
  param {
1137
    lr_mult: 1.0
1138
    decay_mult: 1.0
1139
  }
1140
  convolution_param {
1141
    num_output: 64
1142
    bias_term: false
1143
    pad: 0
1144
    pad: 0
1145
    pad: 0
1146
    kernel_size: 1
1147
    kernel_size: 1
1148
    kernel_size: 1
1149
    stride: 1
1150
    stride: 1
1151
    stride: 1
1152
    weight_filler {
1153
      type: "msra"
1154
    }
1155
    bias_filler {
1156
      type: "constant"
1157
    }
1158
    axis: 1
1159
  }
1160
}
1161
layer {
1162
  name: "BatchNorm12"
1163
  type: "BatchNorm"
1164
  bottom: "Convolution10"
1165
  top: "BatchNorm12"
1166
  param {
1167
    lr_mult: 0.0
1168
    decay_mult: 0.0
1169
  }
1170
  param {
1171
    lr_mult: 0.0
1172
    decay_mult: 0.0
1173
  }
1174
  param {
1175
    lr_mult: 0.0
1176
    decay_mult: 0.0
1177
  }
1178
  batch_norm_param {
1179
    use_global_stats: false
1180
  }
1181
}
1182
layer {
1183
  name: "Scale12"
1184
  type: "Scale"
1185
  bottom: "BatchNorm12"
1186
  top: "BatchNorm12"
1187
  scale_param {
1188
    filler {
1189
      value: 1.0
1190
    }
1191
    bias_term: true
1192
    bias_filler {
1193
      value: 0.0
1194
    }
1195
  }
1196
}
1197
layer {
1198
  name: "ReLU12"
1199
  type: "ReLU"
1200
  bottom: "BatchNorm12"
1201
  top: "BatchNorm12"
1202
}
1203
layer {
1204
  name: "Convolution11"
1205
  type: "Convolution"
1206
  bottom: "BatchNorm12"
1207
  top: "Convolution11"
1208
  param {
1209
    lr_mult: 1.0
1210
    decay_mult: 1.0
1211
  }
1212
  convolution_param {
1213
    num_output: 16
1214
    bias_term: false
1215
    pad: 1
1216
    pad: 1
1217
    pad: 1
1218
    kernel_size: 3
1219
    kernel_size: 3
1220
    kernel_size: 3
1221
    stride: 1
1222
    stride: 1
1223
    stride: 1
1224
    weight_filler {
1225
      type: "msra"
1226
    }
1227
    bias_filler {
1228
      type: "constant"
1229
    }
1230
    axis: 1
1231
  }
1232
}
1233
layer {
1234
  name: "Dropout5"
1235
  type: "Dropout"
1236
  bottom: "Convolution11"
1237
  top: "Dropout5"
1238
  dropout_param {
1239
    dropout_ratio: 0.20000000298
1240
  }
1241
}
1242
layer {
1243
  name: "Concat_6"
1244
  type: "Concat"
1245
  bottom: "Conv_down_5"
1246
  bottom: "Dropout5"
1247
  top: "Concat_6"
1248
  concat_param {
1249
    axis: 1
1250
  }
1251
}
1252
layer {
1253
  name: "BatchNorm13"
1254
  type: "BatchNorm"
1255
  bottom: "Concat_6"
1256
  top: "BatchNorm13"
1257
  param {
1258
    lr_mult: 0.0
1259
    decay_mult: 0.0
1260
  }
1261
  param {
1262
    lr_mult: 0.0
1263
    decay_mult: 0.0
1264
  }
1265
  param {
1266
    lr_mult: 0.0
1267
    decay_mult: 0.0
1268
  }
1269
  batch_norm_param {
1270
    use_global_stats: false
1271
  }
1272
}
1273
layer {
1274
  name: "Scale13"
1275
  type: "Scale"
1276
  bottom: "BatchNorm13"
1277
  top: "BatchNorm13"
1278
  scale_param {
1279
    filler {
1280
      value: 1.0
1281
    }
1282
    bias_term: true
1283
    bias_filler {
1284
      value: 0.0
1285
    }
1286
  }
1287
}
1288
layer {
1289
  name: "ReLU13"
1290
  type: "ReLU"
1291
  bottom: "BatchNorm13"
1292
  top: "BatchNorm13"
1293
}
1294
layer {
1295
  name: "Convolution12"
1296
  type: "Convolution"
1297
  bottom: "BatchNorm13"
1298
  top: "Convolution12"
1299
  param {
1300
    lr_mult: 1.0
1301
    decay_mult: 1.0
1302
  }
1303
  convolution_param {
1304
    num_output: 64
1305
    bias_term: false
1306
    pad: 0
1307
    pad: 0
1308
    pad: 0
1309
    kernel_size: 1
1310
    kernel_size: 1
1311
    kernel_size: 1
1312
    stride: 1
1313
    stride: 1
1314
    stride: 1
1315
    weight_filler {
1316
      type: "msra"
1317
    }
1318
    bias_filler {
1319
      type: "constant"
1320
    }
1321
    axis: 1
1322
  }
1323
}
1324
layer {
1325
  name: "BatchNorm14"
1326
  type: "BatchNorm"
1327
  bottom: "Convolution12"
1328
  top: "BatchNorm14"
1329
  param {
1330
    lr_mult: 0.0
1331
    decay_mult: 0.0
1332
  }
1333
  param {
1334
    lr_mult: 0.0
1335
    decay_mult: 0.0
1336
  }
1337
  param {
1338
    lr_mult: 0.0
1339
    decay_mult: 0.0
1340
  }
1341
  batch_norm_param {
1342
    use_global_stats: false
1343
  }
1344
}
1345
layer {
1346
  name: "Scale14"
1347
  type: "Scale"
1348
  bottom: "BatchNorm14"
1349
  top: "BatchNorm14"
1350
  scale_param {
1351
    filler {
1352
      value: 1.0
1353
    }
1354
    bias_term: true
1355
    bias_filler {
1356
      value: 0.0
1357
    }
1358
  }
1359
}
1360
layer {
1361
  name: "ReLU14"
1362
  type: "ReLU"
1363
  bottom: "BatchNorm14"
1364
  top: "BatchNorm14"
1365
}
1366
layer {
1367
  name: "Convolution13"
1368
  type: "Convolution"
1369
  bottom: "BatchNorm14"
1370
  top: "Convolution13"
1371
  param {
1372
    lr_mult: 1.0
1373
    decay_mult: 1.0
1374
  }
1375
  convolution_param {
1376
    num_output: 16
1377
    bias_term: false
1378
    pad: 1
1379
    pad: 1
1380
    pad: 1
1381
    kernel_size: 3
1382
    kernel_size: 3
1383
    kernel_size: 3
1384
    stride: 1
1385
    stride: 1
1386
    stride: 1
1387
    weight_filler {
1388
      type: "msra"
1389
    }
1390
    bias_filler {
1391
      type: "constant"
1392
    }
1393
    axis: 1
1394
  }
1395
}
1396
layer {
1397
  name: "Dropout6"
1398
  type: "Dropout"
1399
  bottom: "Convolution13"
1400
  top: "Dropout6"
1401
  dropout_param {
1402
    dropout_ratio: 0.20000000298
1403
  }
1404
}
1405
layer {
1406
  name: "Concat_7"
1407
  type: "Concat"
1408
  bottom: "Concat_6"
1409
  bottom: "Dropout6"
1410
  top: "Concat_7"
1411
  concat_param {
1412
    axis: 1
1413
  }
1414
}
1415
layer {
1416
  name: "BatchNorm15"
1417
  type: "BatchNorm"
1418
  bottom: "Concat_7"
1419
  top: "BatchNorm15"
1420
  param {
1421
    lr_mult: 0.0
1422
    decay_mult: 0.0
1423
  }
1424
  param {
1425
    lr_mult: 0.0
1426
    decay_mult: 0.0
1427
  }
1428
  param {
1429
    lr_mult: 0.0
1430
    decay_mult: 0.0
1431
  }
1432
  batch_norm_param {
1433
    use_global_stats: false
1434
  }
1435
}
1436
layer {
1437
  name: "Scale15"
1438
  type: "Scale"
1439
  bottom: "BatchNorm15"
1440
  top: "BatchNorm15"
1441
  scale_param {
1442
    filler {
1443
      value: 1.0
1444
    }
1445
    bias_term: true
1446
    bias_filler {
1447
      value: 0.0
1448
    }
1449
  }
1450
}
1451
layer {
1452
  name: "ReLU15"
1453
  type: "ReLU"
1454
  bottom: "BatchNorm15"
1455
  top: "BatchNorm15"
1456
}
1457
layer {
1458
  name: "Convolution14"
1459
  type: "Convolution"
1460
  bottom: "BatchNorm15"
1461
  top: "Convolution14"
1462
  param {
1463
    lr_mult: 1.0
1464
    decay_mult: 1.0
1465
  }
1466
  convolution_param {
1467
    num_output: 64
1468
    bias_term: false
1469
    pad: 0
1470
    pad: 0
1471
    pad: 0
1472
    kernel_size: 1
1473
    kernel_size: 1
1474
    kernel_size: 1
1475
    stride: 1
1476
    stride: 1
1477
    stride: 1
1478
    weight_filler {
1479
      type: "msra"
1480
    }
1481
    bias_filler {
1482
      type: "constant"
1483
    }
1484
    axis: 1
1485
  }
1486
}
1487
layer {
1488
  name: "BatchNorm16"
1489
  type: "BatchNorm"
1490
  bottom: "Convolution14"
1491
  top: "BatchNorm16"
1492
  param {
1493
    lr_mult: 0.0
1494
    decay_mult: 0.0
1495
  }
1496
  param {
1497
    lr_mult: 0.0
1498
    decay_mult: 0.0
1499
  }
1500
  param {
1501
    lr_mult: 0.0
1502
    decay_mult: 0.0
1503
  }
1504
  batch_norm_param {
1505
    use_global_stats: false
1506
  }
1507
}
1508
layer {
1509
  name: "Scale16"
1510
  type: "Scale"
1511
  bottom: "BatchNorm16"
1512
  top: "BatchNorm16"
1513
  scale_param {
1514
    filler {
1515
      value: 1.0
1516
    }
1517
    bias_term: true
1518
    bias_filler {
1519
      value: 0.0
1520
    }
1521
  }
1522
}
1523
layer {
1524
  name: "ReLU16"
1525
  type: "ReLU"
1526
  bottom: "BatchNorm16"
1527
  top: "BatchNorm16"
1528
}
1529
layer {
1530
  name: "Convolution15"
1531
  type: "Convolution"
1532
  bottom: "BatchNorm16"
1533
  top: "Convolution15"
1534
  param {
1535
    lr_mult: 1.0
1536
    decay_mult: 1.0
1537
  }
1538
  convolution_param {
1539
    num_output: 16
1540
    bias_term: false
1541
    pad: 1
1542
    pad: 1
1543
    pad: 1
1544
    kernel_size: 3
1545
    kernel_size: 3
1546
    kernel_size: 3
1547
    stride: 1
1548
    stride: 1
1549
    stride: 1
1550
    weight_filler {
1551
      type: "msra"
1552
    }
1553
    bias_filler {
1554
      type: "constant"
1555
    }
1556
    axis: 1
1557
  }
1558
}
1559
layer {
1560
  name: "Dropout7"
1561
  type: "Dropout"
1562
  bottom: "Convolution15"
1563
  top: "Dropout7"
1564
  dropout_param {
1565
    dropout_ratio: 0.20000000298
1566
  }
1567
}
1568
layer {
1569
  name: "Concat_8"
1570
  type: "Concat"
1571
  bottom: "Concat_7"
1572
  bottom: "Dropout7"
1573
  top: "Concat_8"
1574
  concat_param {
1575
    axis: 1
1576
  }
1577
}
1578
layer {
1579
  name: "BatchNorm17"
1580
  type: "BatchNorm"
1581
  bottom: "Concat_8"
1582
  top: "BatchNorm17"
1583
  param {
1584
    lr_mult: 0.0
1585
    decay_mult: 0.0
1586
  }
1587
  param {
1588
    lr_mult: 0.0
1589
    decay_mult: 0.0
1590
  }
1591
  param {
1592
    lr_mult: 0.0
1593
    decay_mult: 0.0
1594
  }
1595
  batch_norm_param {
1596
    use_global_stats: false
1597
  }
1598
}
1599
layer {
1600
  name: "Scale17"
1601
  type: "Scale"
1602
  bottom: "BatchNorm17"
1603
  top: "BatchNorm17"
1604
  scale_param {
1605
    filler {
1606
      value: 1.0
1607
    }
1608
    bias_term: true
1609
    bias_filler {
1610
      value: 0.0
1611
    }
1612
  }
1613
}
1614
layer {
1615
  name: "ReLU17"
1616
  type: "ReLU"
1617
  bottom: "BatchNorm17"
1618
  top: "BatchNorm17"
1619
}
1620
layer {
1621
  name: "Convolution16"
1622
  type: "Convolution"
1623
  bottom: "BatchNorm17"
1624
  top: "Convolution16"
1625
  param {
1626
    lr_mult: 1.0
1627
    decay_mult: 1.0
1628
  }
1629
  convolution_param {
1630
    num_output: 64
1631
    bias_term: false
1632
    pad: 0
1633
    pad: 0
1634
    pad: 0
1635
    kernel_size: 1
1636
    kernel_size: 1
1637
    kernel_size: 1
1638
    stride: 1
1639
    stride: 1
1640
    stride: 1
1641
    weight_filler {
1642
      type: "msra"
1643
    }
1644
    bias_filler {
1645
      type: "constant"
1646
    }
1647
    axis: 1
1648
  }
1649
}
1650
layer {
1651
  name: "BatchNorm18"
1652
  type: "BatchNorm"
1653
  bottom: "Convolution16"
1654
  top: "BatchNorm18"
1655
  param {
1656
    lr_mult: 0.0
1657
    decay_mult: 0.0
1658
  }
1659
  param {
1660
    lr_mult: 0.0
1661
    decay_mult: 0.0
1662
  }
1663
  param {
1664
    lr_mult: 0.0
1665
    decay_mult: 0.0
1666
  }
1667
  batch_norm_param {
1668
    use_global_stats: false
1669
  }
1670
}
1671
layer {
1672
  name: "Scale18"
1673
  type: "Scale"
1674
  bottom: "BatchNorm18"
1675
  top: "BatchNorm18"
1676
  scale_param {
1677
    filler {
1678
      value: 1.0
1679
    }
1680
    bias_term: true
1681
    bias_filler {
1682
      value: 0.0
1683
    }
1684
  }
1685
}
1686
layer {
1687
  name: "ReLU18"
1688
  type: "ReLU"
1689
  bottom: "BatchNorm18"
1690
  top: "BatchNorm18"
1691
}
1692
layer {
1693
  name: "Convolution17"
1694
  type: "Convolution"
1695
  bottom: "BatchNorm18"
1696
  top: "Convolution17"
1697
  param {
1698
    lr_mult: 1.0
1699
    decay_mult: 1.0
1700
  }
1701
  convolution_param {
1702
    num_output: 16
1703
    bias_term: false
1704
    pad: 1
1705
    pad: 1
1706
    pad: 1
1707
    kernel_size: 3
1708
    kernel_size: 3
1709
    kernel_size: 3
1710
    stride: 1
1711
    stride: 1
1712
    stride: 1
1713
    weight_filler {
1714
      type: "msra"
1715
    }
1716
    bias_filler {
1717
      type: "constant"
1718
    }
1719
    axis: 1
1720
  }
1721
}
1722
layer {
1723
  name: "Dropout8"
1724
  type: "Dropout"
1725
  bottom: "Convolution17"
1726
  top: "Dropout8"
1727
  dropout_param {
1728
    dropout_ratio: 0.20000000298
1729
  }
1730
}
1731
layer {
1732
  name: "Concat_9"
1733
  type: "Concat"
1734
  bottom: "Concat_8"
1735
  bottom: "Dropout8"
1736
  top: "Concat_9"
1737
  concat_param {
1738
    axis: 1
1739
  }
1740
}
1741
layer {
1742
  name: "Deconvolution_10"
1743
  type: "Deconvolution"
1744
  bottom: "Concat_9"
1745
  top: "Deconvolution_10"
1746
  param {
1747
    lr_mult: 0.10000000149
1748
    decay_mult: 1.0
1749
  }
1750
  convolution_param {
1751
    num_output: 4
1752
    bias_term: false
1753
    pad: 1
1754
    pad: 1
1755
    pad: 1
1756
    kernel_size: 6
1757
    kernel_size: 6
1758
    kernel_size: 6
1759
    group: 4
1760
    stride: 4
1761
    stride: 4
1762
    stride: 4
1763
    weight_filler {
1764
      type: "bilinear_3D"
1765
    }
1766
  }
1767
}
1768
layer {
1769
  name: "BatchNorm19"
1770
  type: "BatchNorm"
1771
  bottom: "Concat_9"
1772
  top: "BatchNorm19"
1773
  param {
1774
    lr_mult: 0.0
1775
    decay_mult: 0.0
1776
  }
1777
  param {
1778
    lr_mult: 0.0
1779
    decay_mult: 0.0
1780
  }
1781
  param {
1782
    lr_mult: 0.0
1783
    decay_mult: 0.0
1784
  }
1785
  batch_norm_param {
1786
    use_global_stats: false
1787
  }
1788
}
1789
layer {
1790
  name: "Scale19"
1791
  type: "Scale"
1792
  bottom: "BatchNorm19"
1793
  top: "BatchNorm19"
1794
  scale_param {
1795
    filler {
1796
      value: 1.0
1797
    }
1798
    bias_term: true
1799
    bias_filler {
1800
      value: 0.0
1801
    }
1802
  }
1803
}
1804
layer {
1805
  name: "ReLU19"
1806
  type: "ReLU"
1807
  bottom: "BatchNorm19"
1808
  top: "BatchNorm19"
1809
}
1810
layer {
1811
  name: "Convolution18"
1812
  type: "Convolution"
1813
  bottom: "BatchNorm19"
1814
  top: "Convolution18"
1815
  param {
1816
    lr_mult: 1.0
1817
    decay_mult: 1.0
1818
  }
1819
  convolution_param {
1820
    num_output: 56
1821
    bias_term: false
1822
    pad: 0
1823
    pad: 0
1824
    pad: 0
1825
    kernel_size: 1
1826
    kernel_size: 1
1827
    kernel_size: 1
1828
    stride: 1
1829
    stride: 1
1830
    stride: 1
1831
    weight_filler {
1832
      type: "msra"
1833
    }
1834
    bias_filler {
1835
      type: "constant"
1836
    }
1837
    axis: 1
1838
  }
1839
}
1840
layer {
1841
  name: "BatchNorm20"
1842
  type: "BatchNorm"
1843
  bottom: "Convolution18"
1844
  top: "BatchNorm20"
1845
  param {
1846
    lr_mult: 0.0
1847
    decay_mult: 0.0
1848
  }
1849
  param {
1850
    lr_mult: 0.0
1851
    decay_mult: 0.0
1852
  }
1853
  param {
1854
    lr_mult: 0.0
1855
    decay_mult: 0.0
1856
  }
1857
  batch_norm_param {
1858
    use_global_stats: false
1859
  }
1860
}
1861
layer {
1862
  name: "Scale20"
1863
  type: "Scale"
1864
  bottom: "BatchNorm20"
1865
  top: "BatchNorm20"
1866
  scale_param {
1867
    filler {
1868
      value: 1.0
1869
    }
1870
    bias_term: true
1871
    bias_filler {
1872
      value: 0.0
1873
    }
1874
  }
1875
}
1876
layer {
1877
  name: "ReLU20"
1878
  type: "ReLU"
1879
  bottom: "BatchNorm20"
1880
  top: "BatchNorm20"
1881
}
1882
layer {
1883
  name: "Conv_down_10"
1884
  type: "Convolution"
1885
  bottom: "BatchNorm20"
1886
  top: "Conv_down_10"
1887
  param {
1888
    lr_mult: 1.0
1889
    decay_mult: 1.0
1890
  }
1891
  convolution_param {
1892
    num_output: 56
1893
    bias_term: false
1894
    pad: 0
1895
    pad: 0
1896
    pad: 0
1897
    kernel_size: 2
1898
    kernel_size: 2
1899
    kernel_size: 2
1900
    stride: 2
1901
    weight_filler {
1902
      type: "msra"
1903
    }
1904
    bias_filler {
1905
      type: "constant"
1906
    }
1907
    axis: 1
1908
  }
1909
}
1910
layer {
1911
  name: "BatchNorm21"
1912
  type: "BatchNorm"
1913
  bottom: "Conv_down_10"
1914
  top: "BatchNorm21"
1915
  param {
1916
    lr_mult: 0.0
1917
    decay_mult: 0.0
1918
  }
1919
  param {
1920
    lr_mult: 0.0
1921
    decay_mult: 0.0
1922
  }
1923
  param {
1924
    lr_mult: 0.0
1925
    decay_mult: 0.0
1926
  }
1927
  batch_norm_param {
1928
    use_global_stats: false
1929
  }
1930
}
1931
layer {
1932
  name: "Scale21"
1933
  type: "Scale"
1934
  bottom: "BatchNorm21"
1935
  top: "BatchNorm21"
1936
  scale_param {
1937
    filler {
1938
      value: 1.0
1939
    }
1940
    bias_term: true
1941
    bias_filler {
1942
      value: 0.0
1943
    }
1944
  }
1945
}
1946
layer {
1947
  name: "ReLU21"
1948
  type: "ReLU"
1949
  bottom: "BatchNorm21"
1950
  top: "BatchNorm21"
1951
}
1952
layer {
1953
  name: "Convolution19"
1954
  type: "Convolution"
1955
  bottom: "BatchNorm21"
1956
  top: "Convolution19"
1957
  param {
1958
    lr_mult: 1.0
1959
    decay_mult: 1.0
1960
  }
1961
  convolution_param {
1962
    num_output: 64
1963
    bias_term: false
1964
    pad: 0
1965
    pad: 0
1966
    pad: 0
1967
    kernel_size: 1
1968
    kernel_size: 1
1969
    kernel_size: 1
1970
    stride: 1
1971
    stride: 1
1972
    stride: 1
1973
    weight_filler {
1974
      type: "msra"
1975
    }
1976
    bias_filler {
1977
      type: "constant"
1978
    }
1979
    axis: 1
1980
  }
1981
}
1982
layer {
1983
  name: "BatchNorm22"
1984
  type: "BatchNorm"
1985
  bottom: "Convolution19"
1986
  top: "BatchNorm22"
1987
  param {
1988
    lr_mult: 0.0
1989
    decay_mult: 0.0
1990
  }
1991
  param {
1992
    lr_mult: 0.0
1993
    decay_mult: 0.0
1994
  }
1995
  param {
1996
    lr_mult: 0.0
1997
    decay_mult: 0.0
1998
  }
1999
  batch_norm_param {
2000
    use_global_stats: false
2001
  }
2002
}
2003
layer {
2004
  name: "Scale22"
2005
  type: "Scale"
2006
  bottom: "BatchNorm22"
2007
  top: "BatchNorm22"
2008
  scale_param {
2009
    filler {
2010
      value: 1.0
2011
    }
2012
    bias_term: true
2013
    bias_filler {
2014
      value: 0.0
2015
    }
2016
  }
2017
}
2018
layer {
2019
  name: "ReLU22"
2020
  type: "ReLU"
2021
  bottom: "BatchNorm22"
2022
  top: "BatchNorm22"
2023
}
2024
layer {
2025
  name: "Convolution20"
2026
  type: "Convolution"
2027
  bottom: "BatchNorm22"
2028
  top: "Convolution20"
2029
  param {
2030
    lr_mult: 1.0
2031
    decay_mult: 1.0
2032
  }
2033
  convolution_param {
2034
    num_output: 16
2035
    bias_term: false
2036
    pad: 1
2037
    pad: 1
2038
    pad: 1
2039
    kernel_size: 3
2040
    kernel_size: 3
2041
    kernel_size: 3
2042
    stride: 1
2043
    stride: 1
2044
    stride: 1
2045
    weight_filler {
2046
      type: "msra"
2047
    }
2048
    bias_filler {
2049
      type: "constant"
2050
    }
2051
    axis: 1
2052
  }
2053
}
2054
layer {
2055
  name: "Dropout9"
2056
  type: "Dropout"
2057
  bottom: "Convolution20"
2058
  top: "Dropout9"
2059
  dropout_param {
2060
    dropout_ratio: 0.20000000298
2061
  }
2062
}
2063
layer {
2064
  name: "Concat_11"
2065
  type: "Concat"
2066
  bottom: "Conv_down_10"
2067
  bottom: "Dropout9"
2068
  top: "Concat_11"
2069
  concat_param {
2070
    axis: 1
2071
  }
2072
}
2073
layer {
2074
  name: "BatchNorm23"
2075
  type: "BatchNorm"
2076
  bottom: "Concat_11"
2077
  top: "BatchNorm23"
2078
  param {
2079
    lr_mult: 0.0
2080
    decay_mult: 0.0
2081
  }
2082
  param {
2083
    lr_mult: 0.0
2084
    decay_mult: 0.0
2085
  }
2086
  param {
2087
    lr_mult: 0.0
2088
    decay_mult: 0.0
2089
  }
2090
  batch_norm_param {
2091
    use_global_stats: false
2092
  }
2093
}
2094
layer {
2095
  name: "Scale23"
2096
  type: "Scale"
2097
  bottom: "BatchNorm23"
2098
  top: "BatchNorm23"
2099
  scale_param {
2100
    filler {
2101
      value: 1.0
2102
    }
2103
    bias_term: true
2104
    bias_filler {
2105
      value: 0.0
2106
    }
2107
  }
2108
}
2109
layer {
2110
  name: "ReLU23"
2111
  type: "ReLU"
2112
  bottom: "BatchNorm23"
2113
  top: "BatchNorm23"
2114
}
2115
layer {
2116
  name: "Convolution21"
2117
  type: "Convolution"
2118
  bottom: "BatchNorm23"
2119
  top: "Convolution21"
2120
  param {
2121
    lr_mult: 1.0
2122
    decay_mult: 1.0
2123
  }
2124
  convolution_param {
2125
    num_output: 64
2126
    bias_term: false
2127
    pad: 0
2128
    pad: 0
2129
    pad: 0
2130
    kernel_size: 1
2131
    kernel_size: 1
2132
    kernel_size: 1
2133
    stride: 1
2134
    stride: 1
2135
    stride: 1
2136
    weight_filler {
2137
      type: "msra"
2138
    }
2139
    bias_filler {
2140
      type: "constant"
2141
    }
2142
    axis: 1
2143
  }
2144
}
2145
layer {
2146
  name: "BatchNorm24"
2147
  type: "BatchNorm"
2148
  bottom: "Convolution21"
2149
  top: "BatchNorm24"
2150
  param {
2151
    lr_mult: 0.0
2152
    decay_mult: 0.0
2153
  }
2154
  param {
2155
    lr_mult: 0.0
2156
    decay_mult: 0.0
2157
  }
2158
  param {
2159
    lr_mult: 0.0
2160
    decay_mult: 0.0
2161
  }
2162
  batch_norm_param {
2163
    use_global_stats: false
2164
  }
2165
}
2166
layer {
2167
  name: "Scale24"
2168
  type: "Scale"
2169
  bottom: "BatchNorm24"
2170
  top: "BatchNorm24"
2171
  scale_param {
2172
    filler {
2173
      value: 1.0
2174
    }
2175
    bias_term: true
2176
    bias_filler {
2177
      value: 0.0
2178
    }
2179
  }
2180
}
2181
layer {
2182
  name: "ReLU24"
2183
  type: "ReLU"
2184
  bottom: "BatchNorm24"
2185
  top: "BatchNorm24"
2186
}
2187
layer {
2188
  name: "Convolution22"
2189
  type: "Convolution"
2190
  bottom: "BatchNorm24"
2191
  top: "Convolution22"
2192
  param {
2193
    lr_mult: 1.0
2194
    decay_mult: 1.0
2195
  }
2196
  convolution_param {
2197
    num_output: 16
2198
    bias_term: false
2199
    pad: 1
2200
    pad: 1
2201
    pad: 1
2202
    kernel_size: 3
2203
    kernel_size: 3
2204
    kernel_size: 3
2205
    stride: 1
2206
    stride: 1
2207
    stride: 1
2208
    weight_filler {
2209
      type: "msra"
2210
    }
2211
    bias_filler {
2212
      type: "constant"
2213
    }
2214
    axis: 1
2215
  }
2216
}
2217
layer {
2218
  name: "Dropout10"
2219
  type: "Dropout"
2220
  bottom: "Convolution22"
2221
  top: "Dropout10"
2222
  dropout_param {
2223
    dropout_ratio: 0.20000000298
2224
  }
2225
}
2226
layer {
2227
  name: "Concat_12"
2228
  type: "Concat"
2229
  bottom: "Concat_11"
2230
  bottom: "Dropout10"
2231
  top: "Concat_12"
2232
  concat_param {
2233
    axis: 1
2234
  }
2235
}
2236
layer {
2237
  name: "BatchNorm25"
2238
  type: "BatchNorm"
2239
  bottom: "Concat_12"
2240
  top: "BatchNorm25"
2241
  param {
2242
    lr_mult: 0.0
2243
    decay_mult: 0.0
2244
  }
2245
  param {
2246
    lr_mult: 0.0
2247
    decay_mult: 0.0
2248
  }
2249
  param {
2250
    lr_mult: 0.0
2251
    decay_mult: 0.0
2252
  }
2253
  batch_norm_param {
2254
    use_global_stats: false
2255
  }
2256
}
2257
layer {
2258
  name: "Scale25"
2259
  type: "Scale"
2260
  bottom: "BatchNorm25"
2261
  top: "BatchNorm25"
2262
  scale_param {
2263
    filler {
2264
      value: 1.0
2265
    }
2266
    bias_term: true
2267
    bias_filler {
2268
      value: 0.0
2269
    }
2270
  }
2271
}
2272
layer {
2273
  name: "ReLU25"
2274
  type: "ReLU"
2275
  bottom: "BatchNorm25"
2276
  top: "BatchNorm25"
2277
}
2278
layer {
2279
  name: "Convolution23"
2280
  type: "Convolution"
2281
  bottom: "BatchNorm25"
2282
  top: "Convolution23"
2283
  param {
2284
    lr_mult: 1.0
2285
    decay_mult: 1.0
2286
  }
2287
  convolution_param {
2288
    num_output: 64
2289
    bias_term: false
2290
    pad: 0
2291
    pad: 0
2292
    pad: 0
2293
    kernel_size: 1
2294
    kernel_size: 1
2295
    kernel_size: 1
2296
    stride: 1
2297
    stride: 1
2298
    stride: 1
2299
    weight_filler {
2300
      type: "msra"
2301
    }
2302
    bias_filler {
2303
      type: "constant"
2304
    }
2305
    axis: 1
2306
  }
2307
}
2308
layer {
2309
  name: "BatchNorm26"
2310
  type: "BatchNorm"
2311
  bottom: "Convolution23"
2312
  top: "BatchNorm26"
2313
  param {
2314
    lr_mult: 0.0
2315
    decay_mult: 0.0
2316
  }
2317
  param {
2318
    lr_mult: 0.0
2319
    decay_mult: 0.0
2320
  }
2321
  param {
2322
    lr_mult: 0.0
2323
    decay_mult: 0.0
2324
  }
2325
  batch_norm_param {
2326
    use_global_stats: false
2327
  }
2328
}
2329
layer {
2330
  name: "Scale26"
2331
  type: "Scale"
2332
  bottom: "BatchNorm26"
2333
  top: "BatchNorm26"
2334
  scale_param {
2335
    filler {
2336
      value: 1.0
2337
    }
2338
    bias_term: true
2339
    bias_filler {
2340
      value: 0.0
2341
    }
2342
  }
2343
}
2344
layer {
2345
  name: "ReLU26"
2346
  type: "ReLU"
2347
  bottom: "BatchNorm26"
2348
  top: "BatchNorm26"
2349
}
2350
layer {
2351
  name: "Convolution24"
2352
  type: "Convolution"
2353
  bottom: "BatchNorm26"
2354
  top: "Convolution24"
2355
  param {
2356
    lr_mult: 1.0
2357
    decay_mult: 1.0
2358
  }
2359
  convolution_param {
2360
    num_output: 16
2361
    bias_term: false
2362
    pad: 1
2363
    pad: 1
2364
    pad: 1
2365
    kernel_size: 3
2366
    kernel_size: 3
2367
    kernel_size: 3
2368
    stride: 1
2369
    stride: 1
2370
    stride: 1
2371
    weight_filler {
2372
      type: "msra"
2373
    }
2374
    bias_filler {
2375
      type: "constant"
2376
    }
2377
    axis: 1
2378
  }
2379
}
2380
layer {
2381
  name: "Dropout11"
2382
  type: "Dropout"
2383
  bottom: "Convolution24"
2384
  top: "Dropout11"
2385
  dropout_param {
2386
    dropout_ratio: 0.20000000298
2387
  }
2388
}
2389
layer {
2390
  name: "Concat_13"
2391
  type: "Concat"
2392
  bottom: "Concat_12"
2393
  bottom: "Dropout11"
2394
  top: "Concat_13"
2395
  concat_param {
2396
    axis: 1
2397
  }
2398
}
2399
layer {
2400
  name: "BatchNorm27"
2401
  type: "BatchNorm"
2402
  bottom: "Concat_13"
2403
  top: "BatchNorm27"
2404
  param {
2405
    lr_mult: 0.0
2406
    decay_mult: 0.0
2407
  }
2408
  param {
2409
    lr_mult: 0.0
2410
    decay_mult: 0.0
2411
  }
2412
  param {
2413
    lr_mult: 0.0
2414
    decay_mult: 0.0
2415
  }
2416
  batch_norm_param {
2417
    use_global_stats: false
2418
  }
2419
}
2420
layer {
2421
  name: "Scale27"
2422
  type: "Scale"
2423
  bottom: "BatchNorm27"
2424
  top: "BatchNorm27"
2425
  scale_param {
2426
    filler {
2427
      value: 1.0
2428
    }
2429
    bias_term: true
2430
    bias_filler {
2431
      value: 0.0
2432
    }
2433
  }
2434
}
2435
layer {
2436
  name: "ReLU27"
2437
  type: "ReLU"
2438
  bottom: "BatchNorm27"
2439
  top: "BatchNorm27"
2440
}
2441
layer {
2442
  name: "Convolution25"
2443
  type: "Convolution"
2444
  bottom: "BatchNorm27"
2445
  top: "Convolution25"
2446
  param {
2447
    lr_mult: 1.0
2448
    decay_mult: 1.0
2449
  }
2450
  convolution_param {
2451
    num_output: 64
2452
    bias_term: false
2453
    pad: 0
2454
    pad: 0
2455
    pad: 0
2456
    kernel_size: 1
2457
    kernel_size: 1
2458
    kernel_size: 1
2459
    stride: 1
2460
    stride: 1
2461
    stride: 1
2462
    weight_filler {
2463
      type: "msra"
2464
    }
2465
    bias_filler {
2466
      type: "constant"
2467
    }
2468
    axis: 1
2469
  }
2470
}
2471
layer {
2472
  name: "BatchNorm28"
2473
  type: "BatchNorm"
2474
  bottom: "Convolution25"
2475
  top: "BatchNorm28"
2476
  param {
2477
    lr_mult: 0.0
2478
    decay_mult: 0.0
2479
  }
2480
  param {
2481
    lr_mult: 0.0
2482
    decay_mult: 0.0
2483
  }
2484
  param {
2485
    lr_mult: 0.0
2486
    decay_mult: 0.0
2487
  }
2488
  batch_norm_param {
2489
    use_global_stats: false
2490
  }
2491
}
2492
layer {
2493
  name: "Scale28"
2494
  type: "Scale"
2495
  bottom: "BatchNorm28"
2496
  top: "BatchNorm28"
2497
  scale_param {
2498
    filler {
2499
      value: 1.0
2500
    }
2501
    bias_term: true
2502
    bias_filler {
2503
      value: 0.0
2504
    }
2505
  }
2506
}
2507
layer {
2508
  name: "ReLU28"
2509
  type: "ReLU"
2510
  bottom: "BatchNorm28"
2511
  top: "BatchNorm28"
2512
}
2513
layer {
2514
  name: "Convolution26"
2515
  type: "Convolution"
2516
  bottom: "BatchNorm28"
2517
  top: "Convolution26"
2518
  param {
2519
    lr_mult: 1.0
2520
    decay_mult: 1.0
2521
  }
2522
  convolution_param {
2523
    num_output: 16
2524
    bias_term: false
2525
    pad: 1
2526
    pad: 1
2527
    pad: 1
2528
    kernel_size: 3
2529
    kernel_size: 3
2530
    kernel_size: 3
2531
    stride: 1
2532
    stride: 1
2533
    stride: 1
2534
    weight_filler {
2535
      type: "msra"
2536
    }
2537
    bias_filler {
2538
      type: "constant"
2539
    }
2540
    axis: 1
2541
  }
2542
}
2543
layer {
2544
  name: "Dropout12"
2545
  type: "Dropout"
2546
  bottom: "Convolution26"
2547
  top: "Dropout12"
2548
  dropout_param {
2549
    dropout_ratio: 0.20000000298
2550
  }
2551
}
2552
layer {
2553
  name: "Concat_14"
2554
  type: "Concat"
2555
  bottom: "Concat_13"
2556
  bottom: "Dropout12"
2557
  top: "Concat_14"
2558
  concat_param {
2559
    axis: 1
2560
  }
2561
}
2562
layer {
2563
  name: "Deconvolution_15"
2564
  type: "Deconvolution"
2565
  bottom: "Concat_14"
2566
  top: "Deconvolution_15"
2567
  param {
2568
    lr_mult: 0.10000000149
2569
    decay_mult: 1.0
2570
  }
2571
  convolution_param {
2572
    num_output: 4
2573
    bias_term: false
2574
    pad: 1
2575
    pad: 1
2576
    pad: 1
2577
    kernel_size: 10
2578
    kernel_size: 10
2579
    kernel_size: 10
2580
    group: 4
2581
    stride: 8
2582
    stride: 8
2583
    stride: 8
2584
    weight_filler {
2585
      type: "bilinear_3D"
2586
    }
2587
  }
2588
}
2589
layer {
2590
  name: "BatchNorm29"
2591
  type: "BatchNorm"
2592
  bottom: "Concat_14"
2593
  top: "BatchNorm29"
2594
  param {
2595
    lr_mult: 0.0
2596
    decay_mult: 0.0
2597
  }
2598
  param {
2599
    lr_mult: 0.0
2600
    decay_mult: 0.0
2601
  }
2602
  param {
2603
    lr_mult: 0.0
2604
    decay_mult: 0.0
2605
  }
2606
  batch_norm_param {
2607
    use_global_stats: false
2608
  }
2609
}
2610
layer {
2611
  name: "Scale29"
2612
  type: "Scale"
2613
  bottom: "BatchNorm29"
2614
  top: "BatchNorm29"
2615
  scale_param {
2616
    filler {
2617
      value: 1.0
2618
    }
2619
    bias_term: true
2620
    bias_filler {
2621
      value: 0.0
2622
    }
2623
  }
2624
}
2625
layer {
2626
  name: "ReLU29"
2627
  type: "ReLU"
2628
  bottom: "BatchNorm29"
2629
  top: "BatchNorm29"
2630
}
2631
layer {
2632
  name: "Convolution27"
2633
  type: "Convolution"
2634
  bottom: "BatchNorm29"
2635
  top: "Convolution27"
2636
  param {
2637
    lr_mult: 1.0
2638
    decay_mult: 1.0
2639
  }
2640
  convolution_param {
2641
    num_output: 60
2642
    bias_term: false
2643
    pad: 0
2644
    pad: 0
2645
    pad: 0
2646
    kernel_size: 1
2647
    kernel_size: 1
2648
    kernel_size: 1
2649
    stride: 1
2650
    stride: 1
2651
    stride: 1
2652
    weight_filler {
2653
      type: "msra"
2654
    }
2655
    bias_filler {
2656
      type: "constant"
2657
    }
2658
    axis: 1
2659
  }
2660
}
2661
layer {
2662
  name: "BatchNorm30"
2663
  type: "BatchNorm"
2664
  bottom: "Convolution27"
2665
  top: "BatchNorm30"
2666
  param {
2667
    lr_mult: 0.0
2668
    decay_mult: 0.0
2669
  }
2670
  param {
2671
    lr_mult: 0.0
2672
    decay_mult: 0.0
2673
  }
2674
  param {
2675
    lr_mult: 0.0
2676
    decay_mult: 0.0
2677
  }
2678
  batch_norm_param {
2679
    use_global_stats: false
2680
  }
2681
}
2682
layer {
2683
  name: "Scale30"
2684
  type: "Scale"
2685
  bottom: "BatchNorm30"
2686
  top: "BatchNorm30"
2687
  scale_param {
2688
    filler {
2689
      value: 1.0
2690
    }
2691
    bias_term: true
2692
    bias_filler {
2693
      value: 0.0
2694
    }
2695
  }
2696
}
2697
layer {
2698
  name: "ReLU30"
2699
  type: "ReLU"
2700
  bottom: "BatchNorm30"
2701
  top: "BatchNorm30"
2702
}
2703
layer {
2704
  name: "Conv_down_15"
2705
  type: "Convolution"
2706
  bottom: "BatchNorm30"
2707
  top: "Conv_down_15"
2708
  param {
2709
    lr_mult: 1.0
2710
    decay_mult: 1.0
2711
  }
2712
  convolution_param {
2713
    num_output: 60
2714
    bias_term: false
2715
    pad: 0
2716
    pad: 0
2717
    pad: 0
2718
    kernel_size: 2
2719
    kernel_size: 2
2720
    kernel_size: 2
2721
    stride: 2
2722
    weight_filler {
2723
      type: "msra"
2724
    }
2725
    bias_filler {
2726
      type: "constant"
2727
    }
2728
    axis: 1
2729
  }
2730
}
2731
layer {
2732
  name: "BatchNorm31"
2733
  type: "BatchNorm"
2734
  bottom: "Conv_down_15"
2735
  top: "BatchNorm31"
2736
  param {
2737
    lr_mult: 0.0
2738
    decay_mult: 0.0
2739
  }
2740
  param {
2741
    lr_mult: 0.0
2742
    decay_mult: 0.0
2743
  }
2744
  param {
2745
    lr_mult: 0.0
2746
    decay_mult: 0.0
2747
  }
2748
  batch_norm_param {
2749
    use_global_stats: false
2750
  }
2751
}
2752
layer {
2753
  name: "Scale31"
2754
  type: "Scale"
2755
  bottom: "BatchNorm31"
2756
  top: "BatchNorm31"
2757
  scale_param {
2758
    filler {
2759
      value: 1.0
2760
    }
2761
    bias_term: true
2762
    bias_filler {
2763
      value: 0.0
2764
    }
2765
  }
2766
}
2767
layer {
2768
  name: "ReLU31"
2769
  type: "ReLU"
2770
  bottom: "BatchNorm31"
2771
  top: "BatchNorm31"
2772
}
2773
layer {
2774
  name: "Convolution28"
2775
  type: "Convolution"
2776
  bottom: "BatchNorm31"
2777
  top: "Convolution28"
2778
  param {
2779
    lr_mult: 1.0
2780
    decay_mult: 1.0
2781
  }
2782
  convolution_param {
2783
    num_output: 64
2784
    bias_term: false
2785
    pad: 0
2786
    pad: 0
2787
    pad: 0
2788
    kernel_size: 1
2789
    kernel_size: 1
2790
    kernel_size: 1
2791
    stride: 1
2792
    stride: 1
2793
    stride: 1
2794
    weight_filler {
2795
      type: "msra"
2796
    }
2797
    bias_filler {
2798
      type: "constant"
2799
    }
2800
    axis: 1
2801
  }
2802
}
2803
layer {
2804
  name: "BatchNorm32"
2805
  type: "BatchNorm"
2806
  bottom: "Convolution28"
2807
  top: "BatchNorm32"
2808
  param {
2809
    lr_mult: 0.0
2810
    decay_mult: 0.0
2811
  }
2812
  param {
2813
    lr_mult: 0.0
2814
    decay_mult: 0.0
2815
  }
2816
  param {
2817
    lr_mult: 0.0
2818
    decay_mult: 0.0
2819
  }
2820
  batch_norm_param {
2821
    use_global_stats: false
2822
  }
2823
}
2824
layer {
2825
  name: "Scale32"
2826
  type: "Scale"
2827
  bottom: "BatchNorm32"
2828
  top: "BatchNorm32"
2829
  scale_param {
2830
    filler {
2831
      value: 1.0
2832
    }
2833
    bias_term: true
2834
    bias_filler {
2835
      value: 0.0
2836
    }
2837
  }
2838
}
2839
layer {
2840
  name: "ReLU32"
2841
  type: "ReLU"
2842
  bottom: "BatchNorm32"
2843
  top: "BatchNorm32"
2844
}
2845
layer {
2846
  name: "Convolution29"
2847
  type: "Convolution"
2848
  bottom: "BatchNorm32"
2849
  top: "Convolution29"
2850
  param {
2851
    lr_mult: 1.0
2852
    decay_mult: 1.0
2853
  }
2854
  convolution_param {
2855
    num_output: 16
2856
    bias_term: false
2857
    pad: 1
2858
    pad: 1
2859
    pad: 1
2860
    kernel_size: 3
2861
    kernel_size: 3
2862
    kernel_size: 3
2863
    stride: 1
2864
    stride: 1
2865
    stride: 1
2866
    weight_filler {
2867
      type: "msra"
2868
    }
2869
    bias_filler {
2870
      type: "constant"
2871
    }
2872
    axis: 1
2873
  }
2874
}
2875
layer {
2876
  name: "Dropout13"
2877
  type: "Dropout"
2878
  bottom: "Convolution29"
2879
  top: "Dropout13"
2880
  dropout_param {
2881
    dropout_ratio: 0.20000000298
2882
  }
2883
}
2884
layer {
2885
  name: "Concat_19"
2886
  type: "Concat"
2887
  bottom: "Conv_down_15"
2888
  bottom: "Dropout13"
2889
  top: "Concat_19"
2890
  concat_param {
2891
    axis: 1
2892
  }
2893
}
2894
layer {
2895
  name: "BatchNorm33"
2896
  type: "BatchNorm"
2897
  bottom: "Concat_19"
2898
  top: "BatchNorm33"
2899
  param {
2900
    lr_mult: 0.0
2901
    decay_mult: 0.0
2902
  }
2903
  param {
2904
    lr_mult: 0.0
2905
    decay_mult: 0.0
2906
  }
2907
  param {
2908
    lr_mult: 0.0
2909
    decay_mult: 0.0
2910
  }
2911
  batch_norm_param {
2912
    use_global_stats: false
2913
  }
2914
}
2915
layer {
2916
  name: "Scale33"
2917
  type: "Scale"
2918
  bottom: "BatchNorm33"
2919
  top: "BatchNorm33"
2920
  scale_param {
2921
    filler {
2922
      value: 1.0
2923
    }
2924
    bias_term: true
2925
    bias_filler {
2926
      value: 0.0
2927
    }
2928
  }
2929
}
2930
layer {
2931
  name: "ReLU33"
2932
  type: "ReLU"
2933
  bottom: "BatchNorm33"
2934
  top: "BatchNorm33"
2935
}
2936
layer {
2937
  name: "Convolution30"
2938
  type: "Convolution"
2939
  bottom: "BatchNorm33"
2940
  top: "Convolution30"
2941
  param {
2942
    lr_mult: 1.0
2943
    decay_mult: 1.0
2944
  }
2945
  convolution_param {
2946
    num_output: 64
2947
    bias_term: false
2948
    pad: 0
2949
    pad: 0
2950
    pad: 0
2951
    kernel_size: 1
2952
    kernel_size: 1
2953
    kernel_size: 1
2954
    stride: 1
2955
    stride: 1
2956
    stride: 1
2957
    weight_filler {
2958
      type: "msra"
2959
    }
2960
    bias_filler {
2961
      type: "constant"
2962
    }
2963
    axis: 1
2964
  }
2965
}
2966
layer {
2967
  name: "BatchNorm34"
2968
  type: "BatchNorm"
2969
  bottom: "Convolution30"
2970
  top: "BatchNorm34"
2971
  param {
2972
    lr_mult: 0.0
2973
    decay_mult: 0.0
2974
  }
2975
  param {
2976
    lr_mult: 0.0
2977
    decay_mult: 0.0
2978
  }
2979
  param {
2980
    lr_mult: 0.0
2981
    decay_mult: 0.0
2982
  }
2983
  batch_norm_param {
2984
    use_global_stats: false
2985
  }
2986
}
2987
layer {
2988
  name: "Scale34"
2989
  type: "Scale"
2990
  bottom: "BatchNorm34"
2991
  top: "BatchNorm34"
2992
  scale_param {
2993
    filler {
2994
      value: 1.0
2995
    }
2996
    bias_term: true
2997
    bias_filler {
2998
      value: 0.0
2999
    }
3000
  }
3001
}
3002
layer {
3003
  name: "ReLU34"
3004
  type: "ReLU"
3005
  bottom: "BatchNorm34"
3006
  top: "BatchNorm34"
3007
}
3008
layer {
3009
  name: "Convolution31"
3010
  type: "Convolution"
3011
  bottom: "BatchNorm34"
3012
  top: "Convolution31"
3013
  param {
3014
    lr_mult: 1.0
3015
    decay_mult: 1.0
3016
  }
3017
  convolution_param {
3018
    num_output: 16
3019
    bias_term: false
3020
    pad: 1
3021
    pad: 1
3022
    pad: 1
3023
    kernel_size: 3
3024
    kernel_size: 3
3025
    kernel_size: 3
3026
    stride: 1
3027
    stride: 1
3028
    stride: 1
3029
    weight_filler {
3030
      type: "msra"
3031
    }
3032
    bias_filler {
3033
      type: "constant"
3034
    }
3035
    axis: 1
3036
  }
3037
}
3038
layer {
3039
  name: "Dropout14"
3040
  type: "Dropout"
3041
  bottom: "Convolution31"
3042
  top: "Dropout14"
3043
  dropout_param {
3044
    dropout_ratio: 0.20000000298
3045
  }
3046
}
3047
layer {
3048
  name: "Concat_20"
3049
  type: "Concat"
3050
  bottom: "Concat_19"
3051
  bottom: "Dropout14"
3052
  top: "Concat_20"
3053
  concat_param {
3054
    axis: 1
3055
  }
3056
}
3057
layer {
3058
  name: "BatchNorm35"
3059
  type: "BatchNorm"
3060
  bottom: "Concat_20"
3061
  top: "BatchNorm35"
3062
  param {
3063
    lr_mult: 0.0
3064
    decay_mult: 0.0
3065
  }
3066
  param {
3067
    lr_mult: 0.0
3068
    decay_mult: 0.0
3069
  }
3070
  param {
3071
    lr_mult: 0.0
3072
    decay_mult: 0.0
3073
  }
3074
  batch_norm_param {
3075
    use_global_stats: false
3076
  }
3077
}
3078
layer {
3079
  name: "Scale35"
3080
  type: "Scale"
3081
  bottom: "BatchNorm35"
3082
  top: "BatchNorm35"
3083
  scale_param {
3084
    filler {
3085
      value: 1.0
3086
    }
3087
    bias_term: true
3088
    bias_filler {
3089
      value: 0.0
3090
    }
3091
  }
3092
}
3093
layer {
3094
  name: "ReLU35"
3095
  type: "ReLU"
3096
  bottom: "BatchNorm35"
3097
  top: "BatchNorm35"
3098
}
3099
layer {
3100
  name: "Convolution32"
3101
  type: "Convolution"
3102
  bottom: "BatchNorm35"
3103
  top: "Convolution32"
3104
  param {
3105
    lr_mult: 1.0
3106
    decay_mult: 1.0
3107
  }
3108
  convolution_param {
3109
    num_output: 64
3110
    bias_term: false
3111
    pad: 0
3112
    pad: 0
3113
    pad: 0
3114
    kernel_size: 1
3115
    kernel_size: 1
3116
    kernel_size: 1
3117
    stride: 1
3118
    stride: 1
3119
    stride: 1
3120
    weight_filler {
3121
      type: "msra"
3122
    }
3123
    bias_filler {
3124
      type: "constant"
3125
    }
3126
    axis: 1
3127
  }
3128
}
3129
layer {
3130
  name: "BatchNorm36"
3131
  type: "BatchNorm"
3132
  bottom: "Convolution32"
3133
  top: "BatchNorm36"
3134
  param {
3135
    lr_mult: 0.0
3136
    decay_mult: 0.0
3137
  }
3138
  param {
3139
    lr_mult: 0.0
3140
    decay_mult: 0.0
3141
  }
3142
  param {
3143
    lr_mult: 0.0
3144
    decay_mult: 0.0
3145
  }
3146
  batch_norm_param {
3147
    use_global_stats: false
3148
  }
3149
}
3150
layer {
3151
  name: "Scale36"
3152
  type: "Scale"
3153
  bottom: "BatchNorm36"
3154
  top: "BatchNorm36"
3155
  scale_param {
3156
    filler {
3157
      value: 1.0
3158
    }
3159
    bias_term: true
3160
    bias_filler {
3161
      value: 0.0
3162
    }
3163
  }
3164
}
3165
layer {
3166
  name: "ReLU36"
3167
  type: "ReLU"
3168
  bottom: "BatchNorm36"
3169
  top: "BatchNorm36"
3170
}
3171
layer {
3172
  name: "Convolution33"
3173
  type: "Convolution"
3174
  bottom: "BatchNorm36"
3175
  top: "Convolution33"
3176
  param {
3177
    lr_mult: 1.0
3178
    decay_mult: 1.0
3179
  }
3180
  convolution_param {
3181
    num_output: 16
3182
    bias_term: false
3183
    pad: 1
3184
    pad: 1
3185
    pad: 1
3186
    kernel_size: 3
3187
    kernel_size: 3
3188
    kernel_size: 3
3189
    stride: 1
3190
    stride: 1
3191
    stride: 1
3192
    weight_filler {
3193
      type: "msra"
3194
    }
3195
    bias_filler {
3196
      type: "constant"
3197
    }
3198
    axis: 1
3199
  }
3200
}
3201
layer {
3202
  name: "Dropout15"
3203
  type: "Dropout"
3204
  bottom: "Convolution33"
3205
  top: "Dropout15"
3206
  dropout_param {
3207
    dropout_ratio: 0.20000000298
3208
  }
3209
}
3210
layer {
3211
  name: "Concat_21"
3212
  type: "Concat"
3213
  bottom: "Concat_20"
3214
  bottom: "Dropout15"
3215
  top: "Concat_21"
3216
  concat_param {
3217
    axis: 1
3218
  }
3219
}
3220
layer {
3221
  name: "BatchNorm37"
3222
  type: "BatchNorm"
3223
  bottom: "Concat_21"
3224
  top: "BatchNorm37"
3225
  param {
3226
    lr_mult: 0.0
3227
    decay_mult: 0.0
3228
  }
3229
  param {
3230
    lr_mult: 0.0
3231
    decay_mult: 0.0
3232
  }
3233
  param {
3234
    lr_mult: 0.0
3235
    decay_mult: 0.0
3236
  }
3237
  batch_norm_param {
3238
    use_global_stats: false
3239
  }
3240
}
3241
layer {
3242
  name: "Scale37"
3243
  type: "Scale"
3244
  bottom: "BatchNorm37"
3245
  top: "BatchNorm37"
3246
  scale_param {
3247
    filler {
3248
      value: 1.0
3249
    }
3250
    bias_term: true
3251
    bias_filler {
3252
      value: 0.0
3253
    }
3254
  }
3255
}
3256
layer {
3257
  name: "ReLU37"
3258
  type: "ReLU"
3259
  bottom: "BatchNorm37"
3260
  top: "BatchNorm37"
3261
}
3262
layer {
3263
  name: "Convolution34"
3264
  type: "Convolution"
3265
  bottom: "BatchNorm37"
3266
  top: "Convolution34"
3267
  param {
3268
    lr_mult: 1.0
3269
    decay_mult: 1.0
3270
  }
3271
  convolution_param {
3272
    num_output: 64
3273
    bias_term: false
3274
    pad: 0
3275
    pad: 0
3276
    pad: 0
3277
    kernel_size: 1
3278
    kernel_size: 1
3279
    kernel_size: 1
3280
    stride: 1
3281
    stride: 1
3282
    stride: 1
3283
    weight_filler {
3284
      type: "msra"
3285
    }
3286
    bias_filler {
3287
      type: "constant"
3288
    }
3289
    axis: 1
3290
  }
3291
}
3292
layer {
3293
  name: "BatchNorm38"
3294
  type: "BatchNorm"
3295
  bottom: "Convolution34"
3296
  top: "BatchNorm38"
3297
  param {
3298
    lr_mult: 0.0
3299
    decay_mult: 0.0
3300
  }
3301
  param {
3302
    lr_mult: 0.0
3303
    decay_mult: 0.0
3304
  }
3305
  param {
3306
    lr_mult: 0.0
3307
    decay_mult: 0.0
3308
  }
3309
  batch_norm_param {
3310
    use_global_stats: false
3311
  }
3312
}
3313
layer {
3314
  name: "Scale38"
3315
  type: "Scale"
3316
  bottom: "BatchNorm38"
3317
  top: "BatchNorm38"
3318
  scale_param {
3319
    filler {
3320
      value: 1.0
3321
    }
3322
    bias_term: true
3323
    bias_filler {
3324
      value: 0.0
3325
    }
3326
  }
3327
}
3328
layer {
3329
  name: "ReLU38"
3330
  type: "ReLU"
3331
  bottom: "BatchNorm38"
3332
  top: "BatchNorm38"
3333
}
3334
layer {
3335
  name: "Convolution35"
3336
  type: "Convolution"
3337
  bottom: "BatchNorm38"
3338
  top: "Convolution35"
3339
  param {
3340
    lr_mult: 1.0
3341
    decay_mult: 1.0
3342
  }
3343
  convolution_param {
3344
    num_output: 16
3345
    bias_term: false
3346
    pad: 1
3347
    pad: 1
3348
    pad: 1
3349
    kernel_size: 3
3350
    kernel_size: 3
3351
    kernel_size: 3
3352
    stride: 1
3353
    stride: 1
3354
    stride: 1
3355
    weight_filler {
3356
      type: "msra"
3357
    }
3358
    bias_filler {
3359
      type: "constant"
3360
    }
3361
    axis: 1
3362
  }
3363
}
3364
layer {
3365
  name: "Dropout16"
3366
  type: "Dropout"
3367
  bottom: "Convolution35"
3368
  top: "Dropout16"
3369
  dropout_param {
3370
    dropout_ratio: 0.20000000298
3371
  }
3372
}
3373
layer {
3374
  name: "Concat_22"
3375
  type: "Concat"
3376
  bottom: "Concat_21"
3377
  bottom: "Dropout16"
3378
  top: "Concat_22"
3379
  concat_param {
3380
    axis: 1
3381
  }
3382
}
3383
layer {
3384
  name: "Deconvolution_20"
3385
  type: "Deconvolution"
3386
  bottom: "Concat_22"
3387
  top: "Deconvolution_20"
3388
  param {
3389
    lr_mult: 0.10000000149
3390
    decay_mult: 1.0
3391
  }
3392
  convolution_param {
3393
    num_output: 4
3394
    bias_term: false
3395
    pad: 1
3396
    pad: 1
3397
    pad: 1
3398
    kernel_size: 18
3399
    kernel_size: 18
3400
    kernel_size: 18
3401
    group: 4
3402
    stride: 16
3403
    stride: 16
3404
    stride: 16
3405
    weight_filler {
3406
      type: "bilinear_3D"
3407
    }
3408
  }
3409
}
3410
layer {
3411
  name: "Concat1"
3412
  type: "Concat"
3413
  bottom: "conv1c"
3414
  bottom: "Deconvolution_5"
3415
  bottom: "Deconvolution_10"
3416
  bottom: "Deconvolution_15"
3417
  bottom: "Deconvolution_20"
3418
  top: "Concat1"
3419
  concat_param {
3420
    axis: 1
3421
  }
3422
}
3423
layer {
3424
  name: "bnorm_concat"
3425
  type: "BatchNorm"
3426
  bottom: "Concat1"
3427
  top: "bnorm_concat"
3428
  param {
3429
    lr_mult: 0.0
3430
    decay_mult: 0.0
3431
  }
3432
  param {
3433
    lr_mult: 0.0
3434
    decay_mult: 0.0
3435
  }
3436
  param {
3437
    lr_mult: 0.0
3438
    decay_mult: 0.0
3439
  }
3440
  batch_norm_param {
3441
    use_global_stats: false
3442
  }
3443
}
3444
layer {
3445
  name: "scale_concat"
3446
  type: "Scale"
3447
  bottom: "bnorm_concat"
3448
  top: "bnorm_concat"
3449
  scale_param {
3450
    filler {
3451
      value: 1.0
3452
    }
3453
    bias_term: true
3454
    bias_filler {
3455
      value: 0.0
3456
    }
3457
  }
3458
}
3459
layer {
3460
  name: "relu_concat"
3461
  type: "ReLU"
3462
  bottom: "bnorm_concat"
3463
  top: "bnorm_concat"
3464
}
3465
layer {
3466
  name: "Convolution36"
3467
  type: "Convolution"
3468
  bottom: "bnorm_concat"
3469
  top: "Convolution36"
3470
  param {
3471
    lr_mult: 1.0
3472
    decay_mult: 1.0
3473
  }
3474
  param {
3475
    lr_mult: 2.0
3476
    decay_mult: 0.0
3477
  }
3478
  convolution_param {
3479
    num_output: 4
3480
    pad: 0
3481
    pad: 0
3482
    pad: 0
3483
    kernel_size: 1
3484
    kernel_size: 1
3485
    kernel_size: 1
3486
    weight_filler {
3487
      type: "msra"
3488
    }
3489
    axis: 1
3490
  }
3491
}
3492
layer {
3493
  name: "loss"
3494
  type: "SoftmaxWithLoss"
3495
  bottom: "Convolution36"
3496
  bottom: "label"
3497
  top: "loss"
3498
}
3499