import numpy as np
import data_transforms
import data_iterators
import pathfinder
import lasagne as nn
from collections import namedtuple
from functools import partial
import lasagne.layers.dnn as dnn
import lasagne
import theano.tensor as T
import utils
import os
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform_data_in = {'patch_size': (432, 432, 432),
'mm_patch_size': (432, 432, 432),
'pixel_spacing': (1., 1., 1.)
}
p_transform = {'patch_size': (48, 48, 48),
'mm_patch_size': (48, 48, 48),
'pixel_spacing': (1., 1., 1.)
}
# data preparation function
def data_prep_function(data, pixel_spacing, p_transform=p_transform):
# TODO: MAKE SURE THAT DATA IS PREPROCESSED THE SAME WAY
#lung_mask = lung_segmentation.segment_HU_scan(data)
x, tf_matrix = data_transforms.transform_scan3d(data=data,
pixel_spacing=pixel_spacing,
p_transform=p_transform_data_in,
lung_mask=None,
p_transform_augment=None)
print 'x.shape', x.shape
x = data_transforms.pixelnormHU(x)
print 'x.shape', x.shape
return x, tf_matrix
# data iterators
batch_size = 1
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.load_pkl(pathfinder.VALIDATION_SPLIT_PATH)
train_pids, valid_pids, test_pids = train_valid_ids['training'], train_valid_ids['validation'],train_valid_ids['test']
print 'n train', len(train_pids)
print 'n valid', len(valid_pids)
print 'n valid', len(test_pids)
all_pids = []
all_pids.extend(train_pids)
all_pids.extend(test_pids)
all_pids.extend(valid_pids)
data_iterator = data_iterators.DSBDataGenerator(data_path=pathfinder.DATA_PATH,
batch_size=chunk_size,
transform_params=None,
data_prep_fun=data_prep_function,
rng=rng,
patient_pids=all_pids,
infinite=True
)
nchunks_per_epoch = data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 100
validate_every = int(5. * nchunks_per_epoch)
save_every = int(1. * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-4,
int(max_nchunks * 0.5): 2e-4,
int(max_nchunks * 0.6): 1e-4,
int(max_nchunks * 0.7): 5e-5,
int(max_nchunks * 0.8): 2e-5,
int(max_nchunks * 0.9): 1e-5
}
# model
conv3d = partial(dnn.Conv3DDNNLayer,
filter_size=3,
pad='same',
W=nn.init.Orthogonal(),
nonlinearity=nn.nonlinearities.very_leaky_rectify)
max_pool3d = partial(dnn.MaxPool3DDNNLayer,
pool_size=2)
drop = lasagne.layers.DropoutLayer
dense = partial(lasagne.layers.DenseLayer,
W=lasagne.init.Orthogonal(),
nonlinearity=lasagne.nonlinearities.very_leaky_rectify)
def inrn_v2(lin):
n_base_filter = 32
l1 = conv3d(lin, n_base_filter, filter_size=1)
l2 = conv3d(lin, n_base_filter, filter_size=1)
l2 = conv3d(l2, n_base_filter, filter_size=3)
l3 = conv3d(lin, n_base_filter, filter_size=1)
l3 = conv3d(l3, n_base_filter, filter_size=3)
l3 = conv3d(l3, n_base_filter, filter_size=3)
l = lasagne.layers.ConcatLayer([l1, l2, l3])
l = conv3d(l, lin.output_shape[1], filter_size=1)
l = lasagne.layers.ElemwiseSumLayer([l, lin])
l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
return l
def inrn_v2_red(lin):
# We want to reduce our total volume /4
den = 16
nom2 = 4
nom3 = 5
nom4 = 7
ins = lin.output_shape[1]
l1 = max_pool3d(lin)
l2 = conv3d(lin, ins // den * nom2, filter_size=3, stride=2)
l3 = conv3d(lin, ins // den * nom2, filter_size=1)
l3 = conv3d(l3, ins // den * nom3, filter_size=3, stride=2)
l4 = conv3d(lin, ins // den * nom2, filter_size=1)
l4 = conv3d(l4, ins // den * nom3, filter_size=3)
l4 = conv3d(l4, ins // den * nom4, filter_size=3, stride=2)
l = lasagne.layers.ConcatLayer([l1, l2, l3, l4])
return l
def feat_red(lin):
# We want to reduce the feature maps by a factor of 2
ins = lin.output_shape[1]
l = conv3d(lin, ins // 2, filter_size=1)
return l
def build_model():
l_in = nn.layers.InputLayer((None, ) + p_transform['patch_size'])
l_dim = nn.layers.DimshuffleLayer(l_in, pattern=[0,'x',1,2,3])
l_target = nn.layers.InputLayer((None, 1))
l = conv3d(l_dim, 64)
l = inrn_v2_red(l)
l = inrn_v2(l)
l = feat_red(l)
l = inrn_v2(l)
l = inrn_v2_red(l)
l = inrn_v2(l)
l = feat_red(l)
l = inrn_v2(l)
l = feat_red(l)
l_out = dense(l, 128)
# l_out = nn.layers.DenseLayer(l, num_units=2,
# W=nn.init.Constant(0.),
# nonlinearity=nn.nonlinearities.softmax)
metadata = utils.load_pkl(os.path.join("/home/eavsteen/dsb3/storage/metadata/dsb3/models/ikorshun/","luna_c3-20170226-174919.pkl"))
for i in range(-20,0):
print metadata['param_values'][i].shape
nn.layers.set_all_param_values(l_out, metadata['param_values'][:-2])
return namedtuple('Model', ['l_in', 'l_out', 'l_target'])(l_in, l_out, l_target)
def build_objective(model, deterministic=False, epsilon=1e-12):
predictions = nn.layers.get_output(model.l_out, deterministic=deterministic)
targets = T.cast(T.flatten(nn.layers.get_output(model.l_target)), 'int32')
p = predictions[T.arange(predictions.shape[0]), targets]
p = T.clip(p, epsilon, 1.)
loss = T.mean(T.log(p))
return -loss
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_out, trainable=True), learning_rate)
return updates