a b/code/pretrainSimple.lua
1
require 'torch';
2
require 'nn';
3
require 'optim';
4
5
cudaFlag = true
6
7
if cudaFlag then
8
    require 'cutorch';
9
    require 'cunn';
10
end
11
12
c = os.clock()
13
t = os.time()
14
15
-- parameters
16
local learningRate = 0.05
17
local learningRateDecay = 0.0005
18
local weightdecay = 0.0000
19
local momentum = 0.9
20
local maxIteration = 5
21
local p = 0.25
22
local batchSize = 200
23
24
25
local folder = '/home/andrew/mitosis/data/mitosis-train-large'
26
27
dofile("data.lua")
28
local classes, classList, imagePaths = getImagePaths(folder)
29
30
dofile('/home/andrew/mitosis/models/model2.lua')
31
net = model2()
32
if cudaFlag then
33
    net = net:cuda()
34
end
35
dofile('/home/andrew/mitosis/code/autoencoder.lua')
36
autoencoder = convnet2autoencoder(net)
37
38
--[
39
autoencoder:insert(nn.Dropout(p),1)
40
if cudaFlag then
41
    autoencoder = autoencoder:cuda()
42
end
43
44
criterion = nn.MSECriterion()
45
if cudaFlag then
46
    criterion = criterion:cuda()
47
end
48
49
-- compute size of each batch
50
batchSizes, numBatches = getBatchSizes(classes, classList, batchSize)
51
52
-- shuffle the images
53
classList = shuffleImages(classList, classes)
54
55
-- train
56
print("# StochasticGradient: training")
57
58
autoencoder:training()
59
60
subNet = nn.Sequential()
61
subNet:insert(autoencoder:get(1),1)
62
63
-- count the number of convolutional layers
64
numConvLayers = 0
65
for i = 1, net:size() do
66
    if torch.typename(net:get(i)) == 'nn.SpatialConvolution' then
67
        numConvLayers = numConvLayers + 1
68
    end
69
end
70
71
errors = {}
72
for j = 1, numConvLayers do
73
    c0 = os.clock()
74
    t0 = os.time()
75
--[
76
    subNet:insert(autoencoder:get(3*(j-1)+2),3*(j-1)+2)
77
    subNet:insert(autoencoder:get(3*(j-1)+3),3*(j-1)+3)
78
    subNet:insert(autoencoder:get(3*(j-1)+4),3*(j-1)+4)
79
    subNet:insert(autoencoder:get(autoencoder:size()-3*(j-1)-2),3*(j-1)+5)
80
    subNet:insert(autoencoder:get(autoencoder:size()-3*(j-1)-1),3*(j-1)+6)
81
    subNet:insert(autoencoder:get(autoencoder:size()-3*(j-1)),3*(j-1)+7)
82
83
    params, gradParams = subNet:getParameters()
84
85
    optimState = {}
86
    optimState.learningRate = learningRate
87
    optimState.learningRateDecay = learningRateDecay
88
    optimState.weightDecay = weightDecay
89
    optimState.momentum = momentum
90
--]]
91
92
    for epoch = 1, maxIteration do
93
        c1 = os.clock()
94
        t1 = os.time()
95
96
                
97
        local currentError = 0
98
99
        local sampleSum = {}
100
        for i = 1, #classes do
101
            sampleSum[i] = 0
102
        end
103
104
        for i = 1, numBatches do
105
            c2 = os.clock()
106
            t2 = os.time()
107
108
            -- split classList into batches
109
            local sampleList = {}
110
            for j=1,#classes do
111
                sampleList[j] = classList[j][{{sampleSum[j] + 1, sampleSum[j] + batchSizes[j][i]}}]
112
                sampleSum[j] = sampleSum[j] + batchSizes[j][i]
113
            end
114
115
            local dataset = getSample(classes, sampleList, imagePaths)
116
            if cudaFlag then
117
                dataset.data = dataset.data:cuda()
118
            end
119
            dataset.label = dataset.data
120
121
            local input = dataset.data
122
            local target = dataset.label
123
124
            function feval(params)
125
                gradParams:zero()
126
127
                local outputs = subNet:forward(input)
128
                local loss = criterion:forward(outputs, target)
129
                local dloss_doutputs = criterion:backward(outputs, target)
130
                subNet:backward(input, dloss_doutputs)
131
132
                return loss, gradParams
133
            end
134
             _, fs = optim.sgd(feval, params, optimState)
135
136
            print('Layer = ' .. j .. ' of ' .. numConvLayers)
137
            print('Epoch = ' .. epoch .. ' of ' .. maxIteration)
138
            print('Batch = ' .. i .. ' of ' .. numBatches)
139
            for k=1,#errors do
140
                print('Final Error for Layer ' .. k .. ' = ' .. errors[k])
141
            end
142
            print('Error = ' .. fs[1])
143
            print('CPU batch time = ' .. os.clock()-c2 .. ' seconds')
144
            print('Actual batch time (rounded) = ' .. os.time()-t2 .. ' seconds')
145
            if epochClock then
146
                print('CPU epoch time = ' .. epochClock .. ' seconds')
147
                print('Actual epoch time (rounded) = ' .. epochTime .. ' seconds')
148
            end
149
            if layerClock then
150
                print('CPU layer time = ' .. layerClock .. ' seconds')
151
                print('Actual layer time (rounded) = ' ..layerTime .. ' seconds')
152
            end
153
            print('Total CPU time so far = ' .. os.clock()-c .. ' seconds')
154
            print('Total actual time so far (rounded) = ' .. os.time()-t .. ' seconds')
155
            print('')
156
        end
157
158
        epochClock = os.clock()-c1
159
        epochTime = os.time()-t1
160
    end
161
162
    errors[j] = fs[1]
163
164
    layerClock = os.clock()-c0
165
    layerTime = os.time()-t0
166
end
167
168
--autoencoder = torch.load('/home/andrew/mitosis/data/nets/pretrain.t7')
169
170
-- get CNN from autoencoder
171
net = autoencoder2convnet(autoencoder,net)
172
173
torch.save('/home/andrew/mitosis/data/nets/model2-pretrained-greedylayerwise2.t7',net)
174
175
totalClock = os.clock()-c
176
totalTime = os.time()-t
177
print('Total CPU time = ' .. totalClock .. ' seconds')
178
print('Total actual time (rounded) ' .. totalTime .. ' seconds')