|
a |
|
b/nn_lung.py |
|
|
1 |
import lasagne as nn |
|
|
2 |
import theano.tensor as T |
|
|
3 |
import numpy as np |
|
|
4 |
from lasagne import nonlinearities |
|
|
5 |
from lasagne.layers.dnn import Conv2DDNNLayer |
|
|
6 |
|
|
|
7 |
|
|
|
8 |
def lb_softplus(lb=1): |
|
|
9 |
return lambda x: nn.nonlinearities.softplus(x) + lb |
|
|
10 |
|
|
|
11 |
|
|
|
12 |
class MultLayer(nn.layers.MergeLayer): |
|
|
13 |
""" |
|
|
14 |
takes elementwise product between 2 layers |
|
|
15 |
""" |
|
|
16 |
|
|
|
17 |
def __init__(self, input1, input2, log=False, **kwargs): |
|
|
18 |
super(MultLayer, self).__init__([input1, input2], **kwargs) |
|
|
19 |
|
|
|
20 |
def get_output_shape_for(self, input_shapes): |
|
|
21 |
return input_shapes[0] |
|
|
22 |
|
|
|
23 |
def get_output_for(self, inputs, **kwargs): |
|
|
24 |
return inputs[0] * inputs[1] |
|
|
25 |
|
|
|
26 |
|
|
|
27 |
class ConstantLayer(nn.layers.Layer): |
|
|
28 |
""" |
|
|
29 |
Makes a layer of constant value the same shape as the given input layer |
|
|
30 |
""" |
|
|
31 |
|
|
|
32 |
def __init__(self, shape_layer, constant=1, **kwargs): |
|
|
33 |
super(ConstantLayer, self).__init__(shape_layer, **kwargs) |
|
|
34 |
self.constant = constant |
|
|
35 |
|
|
|
36 |
def get_output_shape_for(self, input_shape): |
|
|
37 |
return input_shape |
|
|
38 |
|
|
|
39 |
def get_output_for(self, input, **kwargs): |
|
|
40 |
return T.ones_like(input) * self.constant |
|
|
41 |
|
|
|
42 |
|
|
|
43 |
class RepeatLayer(nn.layers.Layer): |
|
|
44 |
def __init__(self, incoming, repeats, axis=0, **kwargs): |
|
|
45 |
super(RepeatLayer, self).__init__(incoming, **kwargs) |
|
|
46 |
self.repeats = repeats |
|
|
47 |
self.axis = axis |
|
|
48 |
|
|
|
49 |
def get_output_shape_for(self, input_shape): |
|
|
50 |
output_shape = list(input_shape) |
|
|
51 |
output_shape.insert(self.axis, self.repeats) |
|
|
52 |
return tuple(output_shape) |
|
|
53 |
|
|
|
54 |
def get_output_for(self, input, **kwargs): |
|
|
55 |
shape_ones = [1] * input.ndim |
|
|
56 |
shape_ones.insert(self.axis, self.repeats) |
|
|
57 |
ones = T.ones(tuple(shape_ones), dtype=input.dtype) |
|
|
58 |
|
|
|
59 |
pattern = range(input.ndim) |
|
|
60 |
pattern.insert(self.axis, "x") |
|
|
61 |
# print shape_ones, pattern |
|
|
62 |
return ones * input.dimshuffle(*pattern) |
|
|
63 |
|
|
|
64 |
|
|
|
65 |
class AttentionLayer(nn.layers.Layer): |
|
|
66 |
def __init__(self, incoming, u=nn.init.GlorotUniform(), **kwargs): |
|
|
67 |
super(AttentionLayer, self).__init__(incoming, **kwargs) |
|
|
68 |
num_inputs = self.input_shape[-1] |
|
|
69 |
self.u = self.add_param(u, (num_inputs, 1), name='u') |
|
|
70 |
|
|
|
71 |
def get_output_shape_for(self, input_shape): |
|
|
72 |
return input_shape[0], input_shape[-1] |
|
|
73 |
|
|
|
74 |
def get_output_for(self, input, **kwargs): |
|
|
75 |
a = T.nnet.softmax(T.dot(input, self.u)[:, :, 0]) |
|
|
76 |
return T.sum(a[:, :, np.newaxis] * input, axis=1) |
|
|
77 |
|
|
|
78 |
|
|
|
79 |
class MaskedMeanPoolLayer(nn.layers.MergeLayer): |
|
|
80 |
""" |
|
|
81 |
pools globally across all trailing dimensions beyond the given axis. |
|
|
82 |
give it a mask |
|
|
83 |
""" |
|
|
84 |
|
|
|
85 |
def __init__(self, incoming, mask, axis, **kwargs): |
|
|
86 |
super(MaskedMeanPoolLayer, self).__init__([incoming, mask], **kwargs) |
|
|
87 |
self.axis = axis |
|
|
88 |
|
|
|
89 |
def get_output_shape_for(self, input_shapes): |
|
|
90 |
return input_shapes[0][:self.axis] + (1,) |
|
|
91 |
|
|
|
92 |
def get_output_for(self, inputs, **kwargs): |
|
|
93 |
input = inputs[0] |
|
|
94 |
mask = inputs[1] |
|
|
95 |
masked_input = input * mask.dimshuffle(0, 1, 'x') |
|
|
96 |
return T.sum(masked_input.flatten(self.axis + 1), axis=self.axis, keepdims=True) / T.sum(mask, axis=-1, |
|
|
97 |
keepdims=True) |
|
|
98 |
|
|
|
99 |
|
|
|
100 |
class MaskedSTDPoolLayer(nn.layers.MergeLayer): |
|
|
101 |
""" |
|
|
102 |
pools globally across all trailing dimensions beyond the given axis. |
|
|
103 |
give it a mask |
|
|
104 |
""" |
|
|
105 |
|
|
|
106 |
def __init__(self, incoming, mask, axis, **kwargs): |
|
|
107 |
super(MaskedSTDPoolLayer, self).__init__([incoming, mask], **kwargs) |
|
|
108 |
self.axis = axis |
|
|
109 |
|
|
|
110 |
def get_output_shape_for(self, input_shapes): |
|
|
111 |
return input_shapes[0][:self.axis] + (1,) |
|
|
112 |
|
|
|
113 |
def get_output_for(self, inputs, **kwargs): |
|
|
114 |
input = inputs[0] |
|
|
115 |
mask = inputs[1] |
|
|
116 |
masked_input = input * mask.dimshuffle(0, 1, 'x') |
|
|
117 |
mu_x = T.sum(masked_input.flatten(self.axis + 1), axis=self.axis, keepdims=True) / T.sum(mask, axis=-1, |
|
|
118 |
keepdims=True) |
|
|
119 |
mu_x2 = T.sum(masked_input.flatten(self.axis + 1) ** 2, axis=self.axis, keepdims=True) / T.sum(mask, axis=-1, |
|
|
120 |
keepdims=True) |
|
|
121 |
return T.sqrt(mu_x2 - mu_x ** 2) |
|
|
122 |
|
|
|
123 |
|
|
|
124 |
class NonlinearityLayer(nn.layers.Layer): |
|
|
125 |
def __init__(self, incoming, nonlinearity=nonlinearities.rectify, |
|
|
126 |
**kwargs): |
|
|
127 |
super(NonlinearityLayer, self).__init__(incoming, **kwargs) |
|
|
128 |
self.nonlinearity = (nonlinearities.identity if nonlinearity is None |
|
|
129 |
else nonlinearity) |
|
|
130 |
|
|
|
131 |
def get_output_for(self, input, **kwargs): |
|
|
132 |
return self.nonlinearity(input) |
|
|
133 |
|
|
|
134 |
|
|
|
135 |
class CumSumLayer(nn.layers.Layer): |
|
|
136 |
def __init__(self, incoming, axis=1, **kwargs): |
|
|
137 |
super(CumSumLayer, self).__init__(incoming, **kwargs) |
|
|
138 |
self.axis = axis |
|
|
139 |
|
|
|
140 |
def get_output_shape_for(self, input_shape): |
|
|
141 |
return input_shape |
|
|
142 |
|
|
|
143 |
def get_output_for(self, input, **kwargs): |
|
|
144 |
result = T.extra_ops.cumsum(input, axis=self.axis) |
|
|
145 |
return result |
|
|
146 |
|
|
|
147 |
|
|
|
148 |
class NormalisationLayer(nn.layers.Layer): |
|
|
149 |
def __init__(self, incoming, norm_sum=1.0, allow_negative=False, **kwargs): |
|
|
150 |
super(NormalisationLayer, self).__init__(incoming, **kwargs) |
|
|
151 |
self.norm_sum = norm_sum |
|
|
152 |
self.allow_negative = allow_negative |
|
|
153 |
|
|
|
154 |
def get_output_for(self, input, **kwargs): |
|
|
155 |
# take the minimal working slice size, and use that one. |
|
|
156 |
if self.allow_negative: |
|
|
157 |
inp_low_zero = input - T.min(input, axis=1).dimshuffle(0, 'x') |
|
|
158 |
else: |
|
|
159 |
inp_low_zero = input |
|
|
160 |
return inp_low_zero / T.sum(inp_low_zero, axis=1).dimshuffle(0, 'x') * self.norm_sum |
|
|
161 |
|
|
|
162 |
|
|
|
163 |
class HighwayLayer(nn.layers.MergeLayer): |
|
|
164 |
def __init__(self, gate, input1, input2, **kwargs): |
|
|
165 |
incomings = [gate, input1, input2] |
|
|
166 |
super(HighwayLayer, self).__init__(incomings, **kwargs) |
|
|
167 |
assert gate.output_shape == input1.output_shape == input2.output_shape |
|
|
168 |
|
|
|
169 |
def get_output_shape_for(self, input_shapes): |
|
|
170 |
return input_shapes[0] |
|
|
171 |
|
|
|
172 |
def get_output_for(self, inputs, **kwargs): |
|
|
173 |
return inputs[0] * inputs[1] + (1 - inputs[0]) * inputs[2] |
|
|
174 |
|
|
|
175 |
|
|
|
176 |
def highway_conv3(incoming, nonlinearity=nn.nonlinearities.rectify, **kwargs): |
|
|
177 |
wh = nn.init.Orthogonal('relu') |
|
|
178 |
bh = nn.init.Constant(0.0) |
|
|
179 |
wt = nn.init.Orthogonal('relu') |
|
|
180 |
bt = nn.init.Constant(-2.) |
|
|
181 |
num_filters = incoming.output_shape[1] |
|
|
182 |
|
|
|
183 |
# H |
|
|
184 |
l_h = Conv2DDNNLayer(incoming, num_filters=num_filters, |
|
|
185 |
filter_size=(3, 3), stride=(1, 1), |
|
|
186 |
pad='same', W=wh, b=bh, |
|
|
187 |
nonlinearity=nonlinearity) |
|
|
188 |
# T |
|
|
189 |
l_t = Conv2DDNNLayer(incoming, num_filters=num_filters, |
|
|
190 |
filter_size=(3, 3), stride=(1, 1), |
|
|
191 |
pad='same', W=wt, b=bt, |
|
|
192 |
nonlinearity=T.nnet.sigmoid) |
|
|
193 |
|
|
|
194 |
return HighwayLayer(gate=l_t, input1=l_h, input2=incoming) |
|
|
195 |
|
|
|
196 |
|
|
|
197 |
class Upscale3DLayer(nn.layers.Layer): |
|
|
198 |
""" |
|
|
199 |
3D upscaling layer |
|
|
200 |
Performs 3D upscaling over the three trailing axes of a 5D input tensor. |
|
|
201 |
Parameters |
|
|
202 |
---------- |
|
|
203 |
incoming : a :class:`Layer` instance or tuple |
|
|
204 |
The layer feeding into this layer, or the expected input shape. |
|
|
205 |
scale_factor : integer or iterable |
|
|
206 |
The scale factor in each dimension. If an integer, it is promoted to |
|
|
207 |
a cubic scale factor region. If an iterable, it should have three |
|
|
208 |
elements. |
|
|
209 |
mode : {'repeat', 'dilate'} |
|
|
210 |
Upscaling mode: repeat element values or upscale leaving zeroes between |
|
|
211 |
upscaled elements. Default is 'repeat'. |
|
|
212 |
**kwargs |
|
|
213 |
Any additional keyword arguments are passed to the :class:`Layer` |
|
|
214 |
superclass. |
|
|
215 |
""" |
|
|
216 |
|
|
|
217 |
def __init__(self, incoming, scale_factor, mode='repeat', **kwargs): |
|
|
218 |
super(Upscale3DLayer, self).__init__(incoming, **kwargs) |
|
|
219 |
|
|
|
220 |
self.scale_factor = nn.utils.as_tuple(scale_factor, 3) |
|
|
221 |
|
|
|
222 |
if self.scale_factor[0] < 1 or self.scale_factor[1] < 1 or \ |
|
|
223 |
self.scale_factor[2] < 1: |
|
|
224 |
raise ValueError('Scale factor must be >= 1, not {0}'.format( |
|
|
225 |
self.scale_factor)) |
|
|
226 |
|
|
|
227 |
if mode not in {'repeat', 'dilate'}: |
|
|
228 |
msg = "Mode must be either 'repeat' or 'dilate', not {0}" |
|
|
229 |
raise ValueError(msg.format(mode)) |
|
|
230 |
self.mode = mode |
|
|
231 |
|
|
|
232 |
def get_output_shape_for(self, input_shape): |
|
|
233 |
output_shape = list(input_shape) # copy / convert to mutable list |
|
|
234 |
if output_shape[2] is not None: |
|
|
235 |
output_shape[2] *= self.scale_factor[0] |
|
|
236 |
if output_shape[3] is not None: |
|
|
237 |
output_shape[3] *= self.scale_factor[1] |
|
|
238 |
if output_shape[4] is not None: |
|
|
239 |
output_shape[4] *= self.scale_factor[2] |
|
|
240 |
return tuple(output_shape) |
|
|
241 |
|
|
|
242 |
def get_output_for(self, input, **kwargs): |
|
|
243 |
a, b, c = self.scale_factor |
|
|
244 |
upscaled = input |
|
|
245 |
if self.mode == 'repeat': |
|
|
246 |
if c > 1: |
|
|
247 |
upscaled = T.extra_ops.repeat(upscaled, c, 4) |
|
|
248 |
if b > 1: |
|
|
249 |
upscaled = T.extra_ops.repeat(upscaled, b, 3) |
|
|
250 |
if a > 1: |
|
|
251 |
upscaled = T.extra_ops.repeat(upscaled, a, 2) |
|
|
252 |
elif self.mode == 'dilate': |
|
|
253 |
if c > 1 or b > 1 or a > 1: |
|
|
254 |
output_shape = self.get_output_shape_for(input.shape) |
|
|
255 |
upscaled = T.zeros(shape=output_shape, dtype=input.dtype) |
|
|
256 |
upscaled = T.set_subtensor( |
|
|
257 |
upscaled[:, :, ::a, ::b, ::c], input) |
|
|
258 |
return upscaled |
|
|
259 |
|
|
|
260 |
|
|
|
261 |
class CastingLayer(nn.layers.Layer): |
|
|
262 |
def __init__(self, incoming, dtype, **kwargs): |
|
|
263 |
super(CastingLayer, self).__init__(incoming, **kwargs) |
|
|
264 |
self.dtype = dtype |
|
|
265 |
|
|
|
266 |
def get_output_for(self, input, **kwargs): |
|
|
267 |
return T.cast(input, self.dtype) |
|
|
268 |
|
|
|
269 |
|
|
|
270 |
def heaviside(x, size): |
|
|
271 |
return T.arange(0, size).dimshuffle('x', 0) - T.repeat(x, size, axis=1) >= 0. |
|
|
272 |
|
|
|
273 |
|
|
|
274 |
class NormalCDFLayer(nn.layers.MergeLayer): |
|
|
275 |
def __init__(self, mu, sigma, max_support, **kwargs): |
|
|
276 |
super(NormalCDFLayer, self).__init__([mu, sigma], **kwargs) |
|
|
277 |
self.max_support = max_support |
|
|
278 |
|
|
|
279 |
def get_output_shape_for(self, input_shapes): |
|
|
280 |
return input_shapes[0][0], self.max_support |
|
|
281 |
|
|
|
282 |
def get_output_for(self, input, **kwargs): |
|
|
283 |
mu = input[0] |
|
|
284 |
sigma = input[1] |
|
|
285 |
|
|
|
286 |
x_range = T.arange(0, self.max_support).dimshuffle('x', 0) |
|
|
287 |
mu = T.repeat(mu, self.max_support, axis=1) |
|
|
288 |
sigma = T.repeat(sigma, self.max_support, axis=1) |
|
|
289 |
x = (x_range - mu) / (sigma * T.sqrt(2.) + 1e-16) |
|
|
290 |
cdf = (T.erf(x) + 1.) / 2. |
|
|
291 |
return cdf |
|
|
292 |
|
|
|
293 |
|
|
|
294 |
|
|
|
295 |
class AggAllBenignExp(nn.layers.Layer): |
|
|
296 |
""" |
|
|
297 |
Aggregates the chances |
|
|
298 |
""" |
|
|
299 |
|
|
|
300 |
def __init__(self, incoming, **kwargs): |
|
|
301 |
super(AggAllBenignExp, self).__init__(incoming, **kwargs) |
|
|
302 |
|
|
|
303 |
def get_output_shape_for(self, input_shape): |
|
|
304 |
assert(len(input_shape)==3) |
|
|
305 |
assert(input_shape[2]==1) |
|
|
306 |
return (input_shape[0], 1) |
|
|
307 |
|
|
|
308 |
def get_output_for(self, input, **kwargs): |
|
|
309 |
rectified = nonlinearities.softplus(input) |
|
|
310 |
sum_rect = T.sum(rectified, axis=(1,2)) |
|
|
311 |
output = 1 - T.exp(-sum_rect) |
|
|
312 |
return output |
|
|
313 |
|
|
|
314 |
class AggAllBenignProd(nn.layers.Layer): |
|
|
315 |
""" |
|
|
316 |
takes elementwise product between 2 layers |
|
|
317 |
""" |
|
|
318 |
|
|
|
319 |
def __init__(self, incoming, apply_nl = True, **kwargs): |
|
|
320 |
super(AggAllBenignProd, self).__init__(incoming, **kwargs) |
|
|
321 |
self.apply_nl = apply_nl |
|
|
322 |
|
|
|
323 |
def get_output_shape_for(self, input_shape): |
|
|
324 |
assert(len(input_shape)==3) |
|
|
325 |
assert(input_shape[2]==1) |
|
|
326 |
return (input_shape[0], 1) |
|
|
327 |
|
|
|
328 |
def get_output_for(self, input, **kwargs): |
|
|
329 |
if apply_nl: |
|
|
330 |
ps = nonlinearities.sigmoid(input) |
|
|
331 |
prod = T.prod(ps, axis=(1,2)) |
|
|
332 |
output = 1 - prod |
|
|
333 |
return output |
|
|
334 |
|
|
|
335 |
class AggSoPP(nn.layers.Layer): |
|
|
336 |
""" |
|
|
337 |
Aggregates via Sum of powers |
|
|
338 |
""" |
|
|
339 |
def __init__(self, incoming, exp=nn.init.Constant(2.), **kwargs): |
|
|
340 |
super(AggSoPP, self).__init__(incoming, **kwargs) |
|
|
341 |
self.exp = self.add_param(exp, (1,), name='exp', regularizable=False) |
|
|
342 |
|
|
|
343 |
def get_output_shape_for(self, input_shape): |
|
|
344 |
assert(len(input_shape)==3) |
|
|
345 |
assert(input_shape[2]==1) |
|
|
346 |
return (input_shape[0], 1) |
|
|
347 |
|
|
|
348 |
def get_output_for(self, input, **kwargs): |
|
|
349 |
ps = nonlinearities.sigmoid(input) |
|
|
350 |
powd = ps ** self.exp |
|
|
351 |
tmean = T.mean(powd, axis=(1,2)) |
|
|
352 |
return tmean |
|
|
353 |
|
|
|
354 |
class Unbroadcast(nn.layers.Layer): |
|
|
355 |
""" |
|
|
356 |
takes elementwise product between 2 layers |
|
|
357 |
""" |
|
|
358 |
def __init__(self, incoming, **kwargs): |
|
|
359 |
super(Unbroadcast, self).__init__(incoming, **kwargs) |
|
|
360 |
|
|
|
361 |
def get_output_shape_for(self, input_shape): |
|
|
362 |
return input_shape |
|
|
363 |
|
|
|
364 |
def get_output_for(self, input, **kwargs): |
|
|
365 |
all_dims = range(len(T.shape(input))) |
|
|
366 |
print all_dims |
|
|
367 |
return T.Unbroadcast(input, *all_dims) |
|
|
368 |
|
|
|
369 |
class LogMeanExp(nn.layers.Layer): |
|
|
370 |
""" |
|
|
371 |
ln(mean(exp( r * x ))) / r |
|
|
372 |
""" |
|
|
373 |
|
|
|
374 |
def __init__(self, incoming, r=1, axis=-1, **kwargs): |
|
|
375 |
super(LogMeanExp, self).__init__(incoming, **kwargs) |
|
|
376 |
self.r = np.float32(r) |
|
|
377 |
self.axis = axis |
|
|
378 |
|
|
|
379 |
def get_output_shape_for(self, input_shape): |
|
|
380 |
return (input_shape[0], 1) |
|
|
381 |
|
|
|
382 |
def get_output_for(self, input, **kwargs): |
|
|
383 |
return T.log(T.mean(T.exp(self.r * input), axis=self.axis) + 1e-7) / self.r |
|
|
384 |
|
|
|
385 |
|
|
|
386 |
class AggMILLoss(nn.layers.Layer): |
|
|
387 |
""" |
|
|
388 |
ln(mean(exp( r * x ))) / r |
|
|
389 |
""" |
|
|
390 |
|
|
|
391 |
def __init__(self, incoming, r=1, **kwargs): |
|
|
392 |
super(AggMILLoss, self).__init__(incoming, **kwargs) |
|
|
393 |
self.r = np.float32(r) |
|
|
394 |
|
|
|
395 |
def get_output_shape_for(self, input_shape): |
|
|
396 |
assert(len(input_shape)==3) |
|
|
397 |
assert(input_shape[2]==1) |
|
|
398 |
return (input_shape[0], 2) |
|
|
399 |
|
|
|
400 |
def get_output_for(self, input, **kwargs): |
|
|
401 |
|
|
|
402 |
ps = nonlinearities.sigmoid(input) |
|
|
403 |
sum_p_r_benign = T.sum(ps,axis=1) |
|
|
404 |
sum_log = T.sum(T.log(1-ps+1.e-12),axis=1) |
|
|
405 |
return T.concatenate([sum_log, sum_p_r_benign]) |
|
|
406 |
|
|
|
407 |
|
|
|
408 |
class Real3DFFTLayer(nn.layers.Layer): |
|
|
409 |
""" |
|
|
410 |
Real3DFFTLayer |
|
|
411 |
""" |
|
|
412 |
|
|
|
413 |
def __init__(self, incoming, r=1, **kwargs): |
|
|
414 |
super(Real3DFFTLayer, self).__init__(incoming, **kwargs) |
|
|
415 |
|
|
|
416 |
def get_output_shape_for(self, input_shape): |
|
|
417 |
assert(len(input_shape)==4) |
|
|
418 |
output_shape = (input_shape[0],) |
|
|
419 |
for i in range(1,len(input_shape)): |
|
|
420 |
output_shape = output_shape + (input_shape[i]//2,) |
|
|
421 |
return output_shape |
|
|
422 |
|
|
|
423 |
def get_output_for(self, input, **kwargs): |
|
|
424 |
cfft = T.fft.rfft(input) |
|
|
425 |
magnitude = (cfft[:,:,:,0]**2 + cfft[:,:,:,1]**2)**0.5 |
|
|
426 |
return magnitude |