|
a |
|
b/combinedDeepLearningActiveContour/train_DL_LV_ES.m |
|
|
1 |
%% shape prior using stacked autoencoder |
|
|
2 |
clc; |
|
|
3 |
clear all; |
|
|
4 |
close all; |
|
|
5 |
addpath('functions'); |
|
|
6 |
%% STEP 0: |
|
|
7 |
% parameters |
|
|
8 |
patchsize = 40; |
|
|
9 |
visibleSize = patchsize*patchsize; % number of input units |
|
|
10 |
hiddenSizeL1 = 100; % number of hidden units |
|
|
11 |
hiddenSizeL2=100; |
|
|
12 |
sparsityParam1 = 0.01; % desired average activation of the hidden units. |
|
|
13 |
sparsityParam2=0.01; |
|
|
14 |
lambda = 3e-3; % weight decay parameter |
|
|
15 |
beta = 3; % weight of sparsity penalty term |
|
|
16 |
outputSize=visibleSize; % number of output units |
|
|
17 |
%%====================================================================== |
|
|
18 |
%% STEP 1: laod training inputs and labels from mat files |
|
|
19 |
load matFiles/training_dataES; |
|
|
20 |
train_input=sampleIMAGES(t_Iroi,patchsize); |
|
|
21 |
train_labels=sampleIMAGES(t_yLV,patchsize); |
|
|
22 |
%% train sparse Auto Encoder 1 |
|
|
23 |
% Randomly initialize the parameters |
|
|
24 |
saeTheta1 = initializeParameters(hiddenSizeL1, visibleSize); |
|
|
25 |
|
|
|
26 |
% Use minFunc to minimize the function |
|
|
27 |
addpath minFunc/ |
|
|
28 |
options.Method = 'lbfgs'; % Here, we use L-BFGS to optimize our cost |
|
|
29 |
% function. Generally, for minFunc to work, you |
|
|
30 |
% need a function pointer with two outputs: the |
|
|
31 |
% function value and the gradient. In our problem, |
|
|
32 |
% sparseAutoencoderCost.m satisfies this. |
|
|
33 |
options.maxIter = 400; % Maximum number of iterations of L-BFGS to run |
|
|
34 |
options.display = 'on'; |
|
|
35 |
|
|
|
36 |
[sae1OptTheta, cost] = minFunc( @(p) sparseAutoencoderCost(p, ... |
|
|
37 |
visibleSize, hiddenSizeL1, ... |
|
|
38 |
lambda, sparsityParam1, ... |
|
|
39 |
beta, train_input), ... |
|
|
40 |
saeTheta1, options); |
|
|
41 |
%% STEP 5: Visualization of AE1 |
|
|
42 |
W1 = reshape(sae1OptTheta(1:hiddenSizeL1*visibleSize), hiddenSizeL1, visibleSize); |
|
|
43 |
display_network(W1', 12); |
|
|
44 |
%% compute activations from layer 1 |
|
|
45 |
[sae1Features] = feedForwardAutoencoder(sae1OptTheta, hiddenSizeL1, ... |
|
|
46 |
visibleSize, train_input); |
|
|
47 |
|
|
|
48 |
%% train sparse Auto Encoder 2 |
|
|
49 |
% Randomly initialize the parameters |
|
|
50 |
sae2Theta = initializeParameters(hiddenSizeL2, hiddenSizeL1); |
|
|
51 |
|
|
|
52 |
[sae2OptTheta, costL2] = minFunc( @(p) sparseAutoencoderCost(p, ... |
|
|
53 |
hiddenSizeL1, hiddenSizeL2, ... |
|
|
54 |
lambda, sparsityParam2, ... |
|
|
55 |
beta, sae1Features), ... |
|
|
56 |
sae2Theta, options); |
|
|
57 |
%% compute activation from layer 2 |
|
|
58 |
[sae2Features] = feedForwardAutoencoder(sae2OptTheta, hiddenSizeL2, ... |
|
|
59 |
hiddenSizeL1, sae1Features); |
|
|
60 |
|
|
|
61 |
%% train multi outputs logstic regression |
|
|
62 |
lambda_mr=1e-4; |
|
|
63 |
options_mr.maxIter = 100; |
|
|
64 |
trainLabels=train_labels; |
|
|
65 |
mrModel = mrTrain(hiddenSizeL2, outputSize, lambda_mr, ... |
|
|
66 |
sae2Features, trainLabels, options_mr); |
|
|
67 |
|
|
|
68 |
saeMultRegOptTheta = mrModel.optTheta(:); |
|
|
69 |
|
|
|
70 |
%% fine tuning |
|
|
71 |
|
|
|
72 |
% Initialize the stack using the parameters learned |
|
|
73 |
stack = cell(2,1); |
|
|
74 |
inputSize=visibleSize; |
|
|
75 |
|
|
|
76 |
stack{1}.w = reshape(sae1OptTheta(1:hiddenSizeL1*inputSize), ... |
|
|
77 |
hiddenSizeL1, inputSize); |
|
|
78 |
stack{1}.b = sae1OptTheta(2*hiddenSizeL1*inputSize+1:2*hiddenSizeL1*inputSize+hiddenSizeL1); |
|
|
79 |
|
|
|
80 |
stack{2}.w = reshape(sae2OptTheta(1:hiddenSizeL2*hiddenSizeL1), ... |
|
|
81 |
hiddenSizeL2, hiddenSizeL1); |
|
|
82 |
stack{2}.b = sae2OptTheta(2*hiddenSizeL2*hiddenSizeL1+1:2*hiddenSizeL2*hiddenSizeL1+hiddenSizeL2); |
|
|
83 |
|
|
|
84 |
% Initialize the parameters for the deep model |
|
|
85 |
[stackparams, netconfig] = stack2params(stack); |
|
|
86 |
stackedAETheta = [ saeMultRegOptTheta ; stackparams ]; |
|
|
87 |
|
|
|
88 |
|
|
|
89 |
[stackedAEOptTheta, loss] = minFunc( @(x) stackedAECost(x, ... |
|
|
90 |
inputSize, hiddenSizeL2, outputSize, netconfig, ... |
|
|
91 |
lambda, train_input, train_labels), ... |
|
|
92 |
stackedAETheta, options); |
|
|
93 |
|
|
|
94 |
%% test |
|
|
95 |
% load test data |
|
|
96 |
load matFiles/validation_dataES; |
|
|
97 |
testI=t_Iroi; |
|
|
98 |
test_input=sampleIMAGES(testI,patchsize); |
|
|
99 |
|
|
|
100 |
[pred_yLV] = stackedAEPredict(stackedAEOptTheta, inputSize, hiddenSizeL2, ... |
|
|
101 |
outputSize, netconfig, test_input); |
|
|
102 |
|
|
|
103 |
% the final output is a mask of LV segmentation |
|
|
104 |
yLVhr=reshape(pred_yLV,patchsize,patchsize,[]); |
|
|
105 |
|
|
|
106 |
% scale to image size |
|
|
107 |
scale=size(t_Iroi,1)/patchsize; |
|
|
108 |
Mroi=100; |
|
|
109 |
for k=1:size(yLVhr,3) |
|
|
110 |
y1=yLVhr(:,:,k); |
|
|
111 |
yLV_h=imresize(y1,scale); |
|
|
112 |
end |
|
|
113 |
%% dispaly segmentation |
|
|
114 |
for k=1:30 |
|
|
115 |
I1=testI(:,:,k); |
|
|
116 |
figure(1) |
|
|
117 |
subplot(5,6,k) |
|
|
118 |
imagesc(I1); |
|
|
119 |
colormap(gray);hold on |
|
|
120 |
contour(yLV_h,[0 0],'r','LineWidth',2); |
|
|
121 |
contour(t_yLV,[0 0],'g','LineWidth',2); |
|
|
122 |
end |
|
|
123 |
|
|
|
124 |
% one title for all subplots |
|
|
125 |
set(gcf,'NextPlot','add'); |
|
|
126 |
axes; |
|
|
127 |
h = title(['HiddenSize=',num2str(hiddenSizeL1),' sparsity=',num2str(sparsityParam1)]); |
|
|
128 |
set(gca,'Visible','off'); |
|
|
129 |
set(h,'Visible','on'); |
|
|
130 |
|
|
|
131 |
%% save results |
|
|
132 |
filename=['DLconfigure/LV_ES_V',num2str(patchsize),'_H',num2str(hiddenSizeL1),'_rho_',num2str(sparsityParam1*100),'div100']; |
|
|
133 |
%save (filename); |
|
|
134 |
|