|
a |
|
b/src/LiviaNET_Config_NeuroPaper.ini |
|
|
1 |
|
|
|
2 |
############################################################################################################################################ |
|
|
3 |
################################################# CREATION OF THE NETWORK ##################################################### |
|
|
4 |
############################################################################################################################################ |
|
|
5 |
|
|
|
6 |
|
|
|
7 |
############## =================== General Options ================= ################ |
|
|
8 |
[General] |
|
|
9 |
networkName = liviaTest |
|
|
10 |
# Saving Options |
|
|
11 |
folderName = LiviaNet_Test |
|
|
12 |
|
|
|
13 |
|
|
|
14 |
############## =================== CNN_Architecture ================= ################ |
|
|
15 |
[CNN_Architecture] |
|
|
16 |
numkernelsperlayer = [25,25,25,50,50,50,75,75,75,400,200,150] |
|
|
17 |
|
|
|
18 |
# Kernels shapes: (Note, if kernel size is equal to 1 on one layer means that this layer is fully connected) |
|
|
19 |
# In this example there will be 3 conv layers and 1 fully connected layer (+ classification layer) |
|
|
20 |
kernelshapes = [[3, 3, 3], [3, 3, 3], [3, 3, 3],[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [1],[1],[1]] |
|
|
21 |
|
|
|
22 |
# Intermediate layers to connect to the last conv layer (just before the first fully connected layer) |
|
|
23 |
intermediateConnectedLayers = [2,5] |
|
|
24 |
|
|
|
25 |
# In the current implementation it does not support pooling (To be done...) |
|
|
26 |
pooling_scales = [[1,1,1],[1,1,1],[1,1,1]] |
|
|
27 |
|
|
|
28 |
# Array size should be equal to number of fully connected (FC) layers + classification layer |
|
|
29 |
dropout_Rates = [0.25,0.25,0.25,0.5] |
|
|
30 |
|
|
|
31 |
# Non-linear activations |
|
|
32 |
# Type: 0: Linear |
|
|
33 |
# 1: ReLU |
|
|
34 |
# 2: PReLU |
|
|
35 |
# 3: LeakyReLU |
|
|
36 |
activationType = 2 |
|
|
37 |
|
|
|
38 |
# TODO. Include activation type for Softmax layer |
|
|
39 |
# Number of classes: background + classes to segment |
|
|
40 |
n_classes = 9 |
|
|
41 |
|
|
|
42 |
# ------- Weights initialization ----------- # |
|
|
43 |
# There are some ways to initialize the weights. This is defined by the variable "weight_Initialization" |
|
|
44 |
# Here, there is a list of supported methods |
|
|
45 |
# 0, Classic |
|
|
46 |
# 1: Delving (He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level performance on imagenet classification." ICCV'15) |
|
|
47 |
# 2: Load Pre-trained |
|
|
48 |
# ---------- |
|
|
49 |
# There is also the choice of which layers will be initialized with pre-trained weights. This is specified in the variable |
|
|
50 |
# "load weight layers". This can be either empty (i.e. all layers will be initialized with pre-trained weights in case |
|
|
51 |
# "weight_Initialization" is 1) |
|
|
52 |
weight_Initialization_CNN = 1 |
|
|
53 |
weight_Initialization_FCN = 1 |
|
|
54 |
#load weight layers = [] # Next release |
|
|
55 |
# If using pre-trained models, specify the folder that contains the weights and the indexes of those weights to use |
|
|
56 |
# To ease the transfer between different softwares (i.e matlab for instance), and between different architectures, |
|
|
57 |
# the weights for each layer should be stored as a single file. |
|
|
58 |
# Right now weights have to be in .npy format |
|
|
59 |
weights folderName = /~yourpath/trainedWeights |
|
|
60 |
# Same length as conv layers |
|
|
61 |
weights trained indexes = [0,1,2] |
|
|
62 |
#weight_Initialization_Sec = 1 |
|
|
63 |
|
|
|
64 |
############## =================== Training Options ================= ################ |
|
|
65 |
[Training Parameters] |
|
|
66 |
#n_epochs=20 |
|
|
67 |
batch_size=10 |
|
|
68 |
number Of Epochs = 30 |
|
|
69 |
number Of SubEpochs = 20 |
|
|
70 |
number of samples at each SubEpoch Train = 1000 |
|
|
71 |
# TODO. To define some changes in the learning rate |
|
|
72 |
learning Rate change Type = 0 |
|
|
73 |
# Subvolumes (i.e. samples) sizes. |
|
|
74 |
# Validation equal to testing samples |
|
|
75 |
sampleSize_Train = [27,27,27] |
|
|
76 |
sampleSize_Test = [35,35,35] |
|
|
77 |
|
|
|
78 |
# Cost function values |
|
|
79 |
# 0: |
|
|
80 |
# 1: |
|
|
81 |
costFunction = 0 |
|
|
82 |
SoftMax temperature = 1.0 |
|
|
83 |
#### ========= Learning rate ========== ##### |
|
|
84 |
L1 Regularization Constant = 1e-6 |
|
|
85 |
L2 Regularization Constant = 1e-4 |
|
|
86 |
|
|
|
87 |
# TO check |
|
|
88 |
# The array size has to be equal to the total number of layers (i.e. CNNs + FCs + Classification layer) |
|
|
89 |
#Leraning Rate = [0.0001, 0.0001, 0.0001, 0.0001,0.0001, 0.0001, 0.0001, 0.0001,0.0001, 0.0001, 0.0001, 0.0001,0.0001, 0.0001 ] |
|
|
90 |
Leraning Rate = [0.001] |
|
|
91 |
# First epoch to change learning rate |
|
|
92 |
First Epoch Change LR = 1 |
|
|
93 |
# Each how many epochs change learning rate |
|
|
94 |
Frequency Change LR = 2 |
|
|
95 |
# TODO. Add learning rate for each layer |
|
|
96 |
|
|
|
97 |
#### ========= Momentum ========== ##### |
|
|
98 |
# Type of momentum |
|
|
99 |
# 0: Classic |
|
|
100 |
# 1: Nesterov |
|
|
101 |
Momentum Type = 1 |
|
|
102 |
Momentum Value = 0.6 |
|
|
103 |
# Use momentum normalized? |
|
|
104 |
momentumNormalized = 1 |
|
|
105 |
|
|
|
106 |
#### ======== Optimizer ===== ###### |
|
|
107 |
# Type: 0-> SGD |
|
|
108 |
# 1-> RMSProp (TODO. Check why RMSProp complains....) |
|
|
109 |
Optimizer Type = 1 |
|
|
110 |
|
|
|
111 |
#In case we chose RMSProp |
|
|
112 |
Rho RMSProp = 0.9 |
|
|
113 |
Epsilon RMSProp = 1e-4 |
|
|
114 |
|
|
|
115 |
# Apply Batch normalization |
|
|
116 |
# 0: False, 1: True |
|
|
117 |
applyBatchNormalization = 1 |
|
|
118 |
BatchNormEpochs = 20 |
|
|
119 |
|
|
|
120 |
# Apply padding to images |
|
|
121 |
# 0: False, 1: True |
|
|
122 |
applyPadding = 1 |
|
|
123 |
|
|
|
124 |
############################################################################################################################################ |
|
|
125 |
################################################# TRAINING VALUES ##################################################### |
|
|
126 |
############################################################################################################################################ |
|
|
127 |
|
|
|
128 |
[Training Images] |
|
|
129 |
imagesFolder = ../Dataset/MR/ |
|
|
130 |
GroundTruthFolder = ../Dataset/Label/ |
|
|
131 |
# ROI folder will contain the ROI where to extract the pacthes and where to perform the segmentation. |
|
|
132 |
# Values of the ROI should be 0 (non interest) and 1 (region of interest) |
|
|
133 |
ROIFolder = ../Dataset/ROI/ |
|
|
134 |
# If you have no ROIs |
|
|
135 |
#ROIFolder = [] |
|
|
136 |
# Type of images in the dataset |
|
|
137 |
# 0: nifti format |
|
|
138 |
# 1: matlab format |
|
|
139 |
# IMPORTANT: All the volumes should have been saved as 'vol' |
|
|
140 |
imageTypes = 1 |
|
|
141 |
|
|
|
142 |
# Indexes for training/validation images. Note that indexes should correspond to the position = inex + 1 in the folder, |
|
|
143 |
# since python starts indexing at 0 |
|
|
144 |
indexesForTraining = [0,1,2,3,4,5] |
|
|
145 |
indexesForValidation = [6] |
|
|
146 |
|