[168bda]: / deploy_3d_denseseg.prototxt

Download this file

3494 lines (3492 with data), 51.1 kB

layer {
  name: "data"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 2
      dim: 64
      dim: 64
      dim: 64
    }
  }
}
layer {
  name: "conv1a"
  type: "Convolution"
  bottom: "data"
  top: "conv1a"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0.0
  }
  convolution_param {
    num_output: 32
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
      value: -0.10000000149
    }
    axis: 1
  }
}
layer {
  name: "bnorm1a"
  type: "BatchNorm"
  bottom: "conv1a"
  top: "bnorm1a"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale1a"
  type: "Scale"
  bottom: "bnorm1a"
  top: "bnorm1a"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "relu1a"
  type: "ReLU"
  bottom: "bnorm1a"
  top: "bnorm1a"
}
layer {
  name: "conv1b"
  type: "Convolution"
  bottom: "bnorm1a"
  top: "conv1b"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "bnorm1b"
  type: "BatchNorm"
  bottom: "conv1b"
  top: "bnorm1b"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale1b"
  type: "Scale"
  bottom: "bnorm1b"
  top: "bnorm1b"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "relu1b"
  type: "ReLU"
  bottom: "bnorm1b"
  top: "bnorm1b"
}
layer {
  name: "conv1c"
  type: "Convolution"
  bottom: "bnorm1b"
  top: "conv1c"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "bnorm1c"
  type: "BatchNorm"
  bottom: "conv1c"
  top: "bnorm1c"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale1c"
  type: "Scale"
  bottom: "bnorm1c"
  top: "bnorm1c"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "relu1c"
  type: "ReLU"
  bottom: "bnorm1c"
  top: "bnorm1c"
}
layer {
  name: "Conv_down_1"
  type: "Convolution"
  bottom: "bnorm1c"
  top: "Conv_down_1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 32
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 2
    kernel_size: 2
    kernel_size: 2
    stride: 2
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm1"
  type: "BatchNorm"
  bottom: "Conv_down_1"
  top: "BatchNorm1"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale1"
  type: "Scale"
  bottom: "BatchNorm1"
  top: "BatchNorm1"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU1"
  type: "ReLU"
  bottom: "BatchNorm1"
  top: "BatchNorm1"
}
layer {
  name: "Convolution1"
  type: "Convolution"
  bottom: "BatchNorm1"
  top: "Convolution1"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm2"
  type: "BatchNorm"
  bottom: "Convolution1"
  top: "BatchNorm2"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale2"
  type: "Scale"
  bottom: "BatchNorm2"
  top: "BatchNorm2"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU2"
  type: "ReLU"
  bottom: "BatchNorm2"
  top: "BatchNorm2"
}
layer {
  name: "Convolution2"
  type: "Convolution"
  bottom: "BatchNorm2"
  top: "Convolution2"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout1"
  type: "Dropout"
  bottom: "Convolution2"
  top: "Dropout1"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_1"
  type: "Concat"
  bottom: "Conv_down_1"
  bottom: "Dropout1"
  top: "Concat_1"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm3"
  type: "BatchNorm"
  bottom: "Concat_1"
  top: "BatchNorm3"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale3"
  type: "Scale"
  bottom: "BatchNorm3"
  top: "BatchNorm3"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU3"
  type: "ReLU"
  bottom: "BatchNorm3"
  top: "BatchNorm3"
}
layer {
  name: "Convolution3"
  type: "Convolution"
  bottom: "BatchNorm3"
  top: "Convolution3"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm4"
  type: "BatchNorm"
  bottom: "Convolution3"
  top: "BatchNorm4"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale4"
  type: "Scale"
  bottom: "BatchNorm4"
  top: "BatchNorm4"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU4"
  type: "ReLU"
  bottom: "BatchNorm4"
  top: "BatchNorm4"
}
layer {
  name: "Convolution4"
  type: "Convolution"
  bottom: "BatchNorm4"
  top: "Convolution4"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout2"
  type: "Dropout"
  bottom: "Convolution4"
  top: "Dropout2"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_2"
  type: "Concat"
  bottom: "Concat_1"
  bottom: "Dropout2"
  top: "Concat_2"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm5"
  type: "BatchNorm"
  bottom: "Concat_2"
  top: "BatchNorm5"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale5"
  type: "Scale"
  bottom: "BatchNorm5"
  top: "BatchNorm5"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU5"
  type: "ReLU"
  bottom: "BatchNorm5"
  top: "BatchNorm5"
}
layer {
  name: "Convolution5"
  type: "Convolution"
  bottom: "BatchNorm5"
  top: "Convolution5"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm6"
  type: "BatchNorm"
  bottom: "Convolution5"
  top: "BatchNorm6"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale6"
  type: "Scale"
  bottom: "BatchNorm6"
  top: "BatchNorm6"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU6"
  type: "ReLU"
  bottom: "BatchNorm6"
  top: "BatchNorm6"
}
layer {
  name: "Convolution6"
  type: "Convolution"
  bottom: "BatchNorm6"
  top: "Convolution6"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout3"
  type: "Dropout"
  bottom: "Convolution6"
  top: "Dropout3"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_3"
  type: "Concat"
  bottom: "Concat_2"
  bottom: "Dropout3"
  top: "Concat_3"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm7"
  type: "BatchNorm"
  bottom: "Concat_3"
  top: "BatchNorm7"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale7"
  type: "Scale"
  bottom: "BatchNorm7"
  top: "BatchNorm7"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU7"
  type: "ReLU"
  bottom: "BatchNorm7"
  top: "BatchNorm7"
}
layer {
  name: "Convolution7"
  type: "Convolution"
  bottom: "BatchNorm7"
  top: "Convolution7"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm8"
  type: "BatchNorm"
  bottom: "Convolution7"
  top: "BatchNorm8"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale8"
  type: "Scale"
  bottom: "BatchNorm8"
  top: "BatchNorm8"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU8"
  type: "ReLU"
  bottom: "BatchNorm8"
  top: "BatchNorm8"
}
layer {
  name: "Convolution8"
  type: "Convolution"
  bottom: "BatchNorm8"
  top: "Convolution8"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout4"
  type: "Dropout"
  bottom: "Convolution8"
  top: "Dropout4"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_4"
  type: "Concat"
  bottom: "Concat_3"
  bottom: "Dropout4"
  top: "Concat_4"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Deconvolution_5"
  type: "Deconvolution"
  bottom: "Concat_4"
  top: "Deconvolution_5"
  param {
    lr_mult: 0.10000000149
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 4
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 4
    kernel_size: 4
    kernel_size: 4
    group: 4
    stride: 2
    stride: 2
    stride: 2
    weight_filler {
      type: "bilinear_3D"
    }
  }
}
layer {
  name: "BatchNorm9"
  type: "BatchNorm"
  bottom: "Concat_4"
  top: "BatchNorm9"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale9"
  type: "Scale"
  bottom: "BatchNorm9"
  top: "BatchNorm9"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU9"
  type: "ReLU"
  bottom: "BatchNorm9"
  top: "BatchNorm9"
}
layer {
  name: "Convolution9"
  type: "Convolution"
  bottom: "BatchNorm9"
  top: "Convolution9"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 48
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm10"
  type: "BatchNorm"
  bottom: "Convolution9"
  top: "BatchNorm10"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale10"
  type: "Scale"
  bottom: "BatchNorm10"
  top: "BatchNorm10"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU10"
  type: "ReLU"
  bottom: "BatchNorm10"
  top: "BatchNorm10"
}
layer {
  name: "Conv_down_5"
  type: "Convolution"
  bottom: "BatchNorm10"
  top: "Conv_down_5"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 48
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 2
    kernel_size: 2
    kernel_size: 2
    stride: 2
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm11"
  type: "BatchNorm"
  bottom: "Conv_down_5"
  top: "BatchNorm11"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale11"
  type: "Scale"
  bottom: "BatchNorm11"
  top: "BatchNorm11"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU11"
  type: "ReLU"
  bottom: "BatchNorm11"
  top: "BatchNorm11"
}
layer {
  name: "Convolution10"
  type: "Convolution"
  bottom: "BatchNorm11"
  top: "Convolution10"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm12"
  type: "BatchNorm"
  bottom: "Convolution10"
  top: "BatchNorm12"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale12"
  type: "Scale"
  bottom: "BatchNorm12"
  top: "BatchNorm12"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU12"
  type: "ReLU"
  bottom: "BatchNorm12"
  top: "BatchNorm12"
}
layer {
  name: "Convolution11"
  type: "Convolution"
  bottom: "BatchNorm12"
  top: "Convolution11"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout5"
  type: "Dropout"
  bottom: "Convolution11"
  top: "Dropout5"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_6"
  type: "Concat"
  bottom: "Conv_down_5"
  bottom: "Dropout5"
  top: "Concat_6"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm13"
  type: "BatchNorm"
  bottom: "Concat_6"
  top: "BatchNorm13"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale13"
  type: "Scale"
  bottom: "BatchNorm13"
  top: "BatchNorm13"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU13"
  type: "ReLU"
  bottom: "BatchNorm13"
  top: "BatchNorm13"
}
layer {
  name: "Convolution12"
  type: "Convolution"
  bottom: "BatchNorm13"
  top: "Convolution12"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm14"
  type: "BatchNorm"
  bottom: "Convolution12"
  top: "BatchNorm14"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale14"
  type: "Scale"
  bottom: "BatchNorm14"
  top: "BatchNorm14"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU14"
  type: "ReLU"
  bottom: "BatchNorm14"
  top: "BatchNorm14"
}
layer {
  name: "Convolution13"
  type: "Convolution"
  bottom: "BatchNorm14"
  top: "Convolution13"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout6"
  type: "Dropout"
  bottom: "Convolution13"
  top: "Dropout6"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_7"
  type: "Concat"
  bottom: "Concat_6"
  bottom: "Dropout6"
  top: "Concat_7"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm15"
  type: "BatchNorm"
  bottom: "Concat_7"
  top: "BatchNorm15"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale15"
  type: "Scale"
  bottom: "BatchNorm15"
  top: "BatchNorm15"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU15"
  type: "ReLU"
  bottom: "BatchNorm15"
  top: "BatchNorm15"
}
layer {
  name: "Convolution14"
  type: "Convolution"
  bottom: "BatchNorm15"
  top: "Convolution14"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm16"
  type: "BatchNorm"
  bottom: "Convolution14"
  top: "BatchNorm16"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale16"
  type: "Scale"
  bottom: "BatchNorm16"
  top: "BatchNorm16"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU16"
  type: "ReLU"
  bottom: "BatchNorm16"
  top: "BatchNorm16"
}
layer {
  name: "Convolution15"
  type: "Convolution"
  bottom: "BatchNorm16"
  top: "Convolution15"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout7"
  type: "Dropout"
  bottom: "Convolution15"
  top: "Dropout7"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_8"
  type: "Concat"
  bottom: "Concat_7"
  bottom: "Dropout7"
  top: "Concat_8"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm17"
  type: "BatchNorm"
  bottom: "Concat_8"
  top: "BatchNorm17"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale17"
  type: "Scale"
  bottom: "BatchNorm17"
  top: "BatchNorm17"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU17"
  type: "ReLU"
  bottom: "BatchNorm17"
  top: "BatchNorm17"
}
layer {
  name: "Convolution16"
  type: "Convolution"
  bottom: "BatchNorm17"
  top: "Convolution16"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm18"
  type: "BatchNorm"
  bottom: "Convolution16"
  top: "BatchNorm18"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale18"
  type: "Scale"
  bottom: "BatchNorm18"
  top: "BatchNorm18"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU18"
  type: "ReLU"
  bottom: "BatchNorm18"
  top: "BatchNorm18"
}
layer {
  name: "Convolution17"
  type: "Convolution"
  bottom: "BatchNorm18"
  top: "Convolution17"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout8"
  type: "Dropout"
  bottom: "Convolution17"
  top: "Dropout8"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_9"
  type: "Concat"
  bottom: "Concat_8"
  bottom: "Dropout8"
  top: "Concat_9"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Deconvolution_10"
  type: "Deconvolution"
  bottom: "Concat_9"
  top: "Deconvolution_10"
  param {
    lr_mult: 0.10000000149
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 4
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 6
    kernel_size: 6
    kernel_size: 6
    group: 4
    stride: 4
    stride: 4
    stride: 4
    weight_filler {
      type: "bilinear_3D"
    }
  }
}
layer {
  name: "BatchNorm19"
  type: "BatchNorm"
  bottom: "Concat_9"
  top: "BatchNorm19"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale19"
  type: "Scale"
  bottom: "BatchNorm19"
  top: "BatchNorm19"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU19"
  type: "ReLU"
  bottom: "BatchNorm19"
  top: "BatchNorm19"
}
layer {
  name: "Convolution18"
  type: "Convolution"
  bottom: "BatchNorm19"
  top: "Convolution18"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 56
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm20"
  type: "BatchNorm"
  bottom: "Convolution18"
  top: "BatchNorm20"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale20"
  type: "Scale"
  bottom: "BatchNorm20"
  top: "BatchNorm20"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU20"
  type: "ReLU"
  bottom: "BatchNorm20"
  top: "BatchNorm20"
}
layer {
  name: "Conv_down_10"
  type: "Convolution"
  bottom: "BatchNorm20"
  top: "Conv_down_10"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 56
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 2
    kernel_size: 2
    kernel_size: 2
    stride: 2
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm21"
  type: "BatchNorm"
  bottom: "Conv_down_10"
  top: "BatchNorm21"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale21"
  type: "Scale"
  bottom: "BatchNorm21"
  top: "BatchNorm21"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU21"
  type: "ReLU"
  bottom: "BatchNorm21"
  top: "BatchNorm21"
}
layer {
  name: "Convolution19"
  type: "Convolution"
  bottom: "BatchNorm21"
  top: "Convolution19"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm22"
  type: "BatchNorm"
  bottom: "Convolution19"
  top: "BatchNorm22"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale22"
  type: "Scale"
  bottom: "BatchNorm22"
  top: "BatchNorm22"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU22"
  type: "ReLU"
  bottom: "BatchNorm22"
  top: "BatchNorm22"
}
layer {
  name: "Convolution20"
  type: "Convolution"
  bottom: "BatchNorm22"
  top: "Convolution20"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout9"
  type: "Dropout"
  bottom: "Convolution20"
  top: "Dropout9"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_11"
  type: "Concat"
  bottom: "Conv_down_10"
  bottom: "Dropout9"
  top: "Concat_11"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm23"
  type: "BatchNorm"
  bottom: "Concat_11"
  top: "BatchNorm23"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale23"
  type: "Scale"
  bottom: "BatchNorm23"
  top: "BatchNorm23"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU23"
  type: "ReLU"
  bottom: "BatchNorm23"
  top: "BatchNorm23"
}
layer {
  name: "Convolution21"
  type: "Convolution"
  bottom: "BatchNorm23"
  top: "Convolution21"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm24"
  type: "BatchNorm"
  bottom: "Convolution21"
  top: "BatchNorm24"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale24"
  type: "Scale"
  bottom: "BatchNorm24"
  top: "BatchNorm24"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU24"
  type: "ReLU"
  bottom: "BatchNorm24"
  top: "BatchNorm24"
}
layer {
  name: "Convolution22"
  type: "Convolution"
  bottom: "BatchNorm24"
  top: "Convolution22"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout10"
  type: "Dropout"
  bottom: "Convolution22"
  top: "Dropout10"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_12"
  type: "Concat"
  bottom: "Concat_11"
  bottom: "Dropout10"
  top: "Concat_12"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm25"
  type: "BatchNorm"
  bottom: "Concat_12"
  top: "BatchNorm25"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale25"
  type: "Scale"
  bottom: "BatchNorm25"
  top: "BatchNorm25"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU25"
  type: "ReLU"
  bottom: "BatchNorm25"
  top: "BatchNorm25"
}
layer {
  name: "Convolution23"
  type: "Convolution"
  bottom: "BatchNorm25"
  top: "Convolution23"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm26"
  type: "BatchNorm"
  bottom: "Convolution23"
  top: "BatchNorm26"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale26"
  type: "Scale"
  bottom: "BatchNorm26"
  top: "BatchNorm26"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU26"
  type: "ReLU"
  bottom: "BatchNorm26"
  top: "BatchNorm26"
}
layer {
  name: "Convolution24"
  type: "Convolution"
  bottom: "BatchNorm26"
  top: "Convolution24"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout11"
  type: "Dropout"
  bottom: "Convolution24"
  top: "Dropout11"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_13"
  type: "Concat"
  bottom: "Concat_12"
  bottom: "Dropout11"
  top: "Concat_13"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm27"
  type: "BatchNorm"
  bottom: "Concat_13"
  top: "BatchNorm27"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale27"
  type: "Scale"
  bottom: "BatchNorm27"
  top: "BatchNorm27"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU27"
  type: "ReLU"
  bottom: "BatchNorm27"
  top: "BatchNorm27"
}
layer {
  name: "Convolution25"
  type: "Convolution"
  bottom: "BatchNorm27"
  top: "Convolution25"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm28"
  type: "BatchNorm"
  bottom: "Convolution25"
  top: "BatchNorm28"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale28"
  type: "Scale"
  bottom: "BatchNorm28"
  top: "BatchNorm28"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU28"
  type: "ReLU"
  bottom: "BatchNorm28"
  top: "BatchNorm28"
}
layer {
  name: "Convolution26"
  type: "Convolution"
  bottom: "BatchNorm28"
  top: "Convolution26"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout12"
  type: "Dropout"
  bottom: "Convolution26"
  top: "Dropout12"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_14"
  type: "Concat"
  bottom: "Concat_13"
  bottom: "Dropout12"
  top: "Concat_14"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Deconvolution_15"
  type: "Deconvolution"
  bottom: "Concat_14"
  top: "Deconvolution_15"
  param {
    lr_mult: 0.10000000149
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 4
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 10
    kernel_size: 10
    kernel_size: 10
    group: 4
    stride: 8
    stride: 8
    stride: 8
    weight_filler {
      type: "bilinear_3D"
    }
  }
}
layer {
  name: "BatchNorm29"
  type: "BatchNorm"
  bottom: "Concat_14"
  top: "BatchNorm29"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale29"
  type: "Scale"
  bottom: "BatchNorm29"
  top: "BatchNorm29"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU29"
  type: "ReLU"
  bottom: "BatchNorm29"
  top: "BatchNorm29"
}
layer {
  name: "Convolution27"
  type: "Convolution"
  bottom: "BatchNorm29"
  top: "Convolution27"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 60
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm30"
  type: "BatchNorm"
  bottom: "Convolution27"
  top: "BatchNorm30"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale30"
  type: "Scale"
  bottom: "BatchNorm30"
  top: "BatchNorm30"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU30"
  type: "ReLU"
  bottom: "BatchNorm30"
  top: "BatchNorm30"
}
layer {
  name: "Conv_down_15"
  type: "Convolution"
  bottom: "BatchNorm30"
  top: "Conv_down_15"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 60
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 2
    kernel_size: 2
    kernel_size: 2
    stride: 2
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm31"
  type: "BatchNorm"
  bottom: "Conv_down_15"
  top: "BatchNorm31"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale31"
  type: "Scale"
  bottom: "BatchNorm31"
  top: "BatchNorm31"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU31"
  type: "ReLU"
  bottom: "BatchNorm31"
  top: "BatchNorm31"
}
layer {
  name: "Convolution28"
  type: "Convolution"
  bottom: "BatchNorm31"
  top: "Convolution28"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm32"
  type: "BatchNorm"
  bottom: "Convolution28"
  top: "BatchNorm32"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale32"
  type: "Scale"
  bottom: "BatchNorm32"
  top: "BatchNorm32"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU32"
  type: "ReLU"
  bottom: "BatchNorm32"
  top: "BatchNorm32"
}
layer {
  name: "Convolution29"
  type: "Convolution"
  bottom: "BatchNorm32"
  top: "Convolution29"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout13"
  type: "Dropout"
  bottom: "Convolution29"
  top: "Dropout13"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_19"
  type: "Concat"
  bottom: "Conv_down_15"
  bottom: "Dropout13"
  top: "Concat_19"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm33"
  type: "BatchNorm"
  bottom: "Concat_19"
  top: "BatchNorm33"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale33"
  type: "Scale"
  bottom: "BatchNorm33"
  top: "BatchNorm33"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU33"
  type: "ReLU"
  bottom: "BatchNorm33"
  top: "BatchNorm33"
}
layer {
  name: "Convolution30"
  type: "Convolution"
  bottom: "BatchNorm33"
  top: "Convolution30"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm34"
  type: "BatchNorm"
  bottom: "Convolution30"
  top: "BatchNorm34"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale34"
  type: "Scale"
  bottom: "BatchNorm34"
  top: "BatchNorm34"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU34"
  type: "ReLU"
  bottom: "BatchNorm34"
  top: "BatchNorm34"
}
layer {
  name: "Convolution31"
  type: "Convolution"
  bottom: "BatchNorm34"
  top: "Convolution31"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout14"
  type: "Dropout"
  bottom: "Convolution31"
  top: "Dropout14"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_20"
  type: "Concat"
  bottom: "Concat_19"
  bottom: "Dropout14"
  top: "Concat_20"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm35"
  type: "BatchNorm"
  bottom: "Concat_20"
  top: "BatchNorm35"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale35"
  type: "Scale"
  bottom: "BatchNorm35"
  top: "BatchNorm35"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU35"
  type: "ReLU"
  bottom: "BatchNorm35"
  top: "BatchNorm35"
}
layer {
  name: "Convolution32"
  type: "Convolution"
  bottom: "BatchNorm35"
  top: "Convolution32"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm36"
  type: "BatchNorm"
  bottom: "Convolution32"
  top: "BatchNorm36"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale36"
  type: "Scale"
  bottom: "BatchNorm36"
  top: "BatchNorm36"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU36"
  type: "ReLU"
  bottom: "BatchNorm36"
  top: "BatchNorm36"
}
layer {
  name: "Convolution33"
  type: "Convolution"
  bottom: "BatchNorm36"
  top: "Convolution33"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout15"
  type: "Dropout"
  bottom: "Convolution33"
  top: "Dropout15"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_21"
  type: "Concat"
  bottom: "Concat_20"
  bottom: "Dropout15"
  top: "Concat_21"
  concat_param {
    axis: 1
  }
}
layer {
  name: "BatchNorm37"
  type: "BatchNorm"
  bottom: "Concat_21"
  top: "BatchNorm37"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale37"
  type: "Scale"
  bottom: "BatchNorm37"
  top: "BatchNorm37"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU37"
  type: "ReLU"
  bottom: "BatchNorm37"
  top: "BatchNorm37"
}
layer {
  name: "Convolution34"
  type: "Convolution"
  bottom: "BatchNorm37"
  top: "Convolution34"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "BatchNorm38"
  type: "BatchNorm"
  bottom: "Convolution34"
  top: "BatchNorm38"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "Scale38"
  type: "Scale"
  bottom: "BatchNorm38"
  top: "BatchNorm38"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "ReLU38"
  type: "ReLU"
  bottom: "BatchNorm38"
  top: "BatchNorm38"
}
layer {
  name: "Convolution35"
  type: "Convolution"
  bottom: "BatchNorm38"
  top: "Convolution35"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 16
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    stride: 1
    stride: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "Dropout16"
  type: "Dropout"
  bottom: "Convolution35"
  top: "Dropout16"
  dropout_param {
    dropout_ratio: 0.20000000298
  }
}
layer {
  name: "Concat_22"
  type: "Concat"
  bottom: "Concat_21"
  bottom: "Dropout16"
  top: "Concat_22"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Deconvolution_20"
  type: "Deconvolution"
  bottom: "Concat_22"
  top: "Deconvolution_20"
  param {
    lr_mult: 0.10000000149
    decay_mult: 1.0
  }
  convolution_param {
    num_output: 4
    bias_term: false
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 18
    kernel_size: 18
    kernel_size: 18
    group: 4
    stride: 16
    stride: 16
    stride: 16
    weight_filler {
      type: "bilinear_3D"
    }
  }
}
layer {
  name: "Concat1"
  type: "Concat"
  bottom: "conv1c"
  bottom: "Deconvolution_5"
  bottom: "Deconvolution_10"
  bottom: "Deconvolution_15"
  bottom: "Deconvolution_20"
  top: "Concat1"
  concat_param {
    axis: 1
  }
}
layer {
  name: "bnorm_concat"
  type: "BatchNorm"
  bottom: "Concat1"
  top: "bnorm_concat"
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  param {
    lr_mult: 0.0
    decay_mult: 0.0
  }
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale_concat"
  type: "Scale"
  bottom: "bnorm_concat"
  top: "bnorm_concat"
  scale_param {
    filler {
      value: 1.0
    }
    bias_term: true
    bias_filler {
      value: 0.0
    }
  }
}
layer {
  name: "relu_concat"
  type: "ReLU"
  bottom: "bnorm_concat"
  top: "bnorm_concat"
}
layer {
  name: "Convolution36"
  type: "Convolution"
  bottom: "bnorm_concat"
  top: "Convolution36"
  param {
    lr_mult: 1.0
    decay_mult: 1.0
  }
  param {
    lr_mult: 2.0
    decay_mult: 0.0
  }
  convolution_param {
    num_output: 4
    pad: 0
    pad: 0
    pad: 0
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    weight_filler {
      type: "msra"
    }
    axis: 1
  }
}
layer {
  name: "softmax"
  type: "Softmax"
  bottom: "Convolution36"
  top: "softmax"
}