--- a
+++ b/fetal_net/model/unet/unet.py
@@ -0,0 +1,141 @@
+from imp import reload
+
+import numpy as np
+from keras import backend as K
+from keras.engine import Input, Model
+from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Activation, BatchNormalization, PReLU, Deconvolution2D, \
+    Permute, SpatialDropout2D, Add
+from keras.optimizers import Adam
+
+from ...metrics import dice_coefficient_loss, dice_coefficient, vod_coefficient, \
+    vod_coefficient_loss
+
+K.set_image_data_format("channels_last")
+reload(K)
+
+try:
+    from keras.engine import merge
+except ImportError:
+    from keras.layers.merge import concatenate
+
+
+def unet_model_2d(input_shape, pool_size=(2, 2), n_labels=1, initial_learning_rate=0.00001, deconvolution=False,
+                  depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False,
+                  batch_normalization=False, activation_name="sigmoid", loss_function=dice_coefficient_loss,
+                  dropout_rate=0, **kargs):
+    """
+    Builds the 3D UNet Keras model.f
+    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
+    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
+    coefficient for each label as metric.
+    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
+    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
+    to train the model.
+    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
+    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
+    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
+    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
+    :param pool_size: Pool size for the max pooling operations.
+    :param n_labels: Number of binary labels that the model is learning.
+    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
+    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
+    increases the amount memory required during training.
+    :return: Untrained 3D UNet Model
+    """
+    metrics = ['binary_accuracy', vod_coefficient]
+    if loss_function != dice_coefficient_loss:
+        metrics += [dice_coefficient]
+
+    inputs = Input(input_shape)
+    inputs_p = Permute((3, 1, 2))(inputs)
+
+    current_layer = inputs_p
+    levels = list()
+
+    # add levels with max pooling
+    for layer_depth in range(depth):
+        input_layer = current_layer
+        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2 ** layer_depth),
+                                          batch_normalization=batch_normalization)
+        if dropout_rate > 0:
+            layer1 = SpatialDropout2D(rate=dropout_rate)(layer1)
+        current_layer = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2 ** layer_depth) * 2,
+                                                 batch_normalization=batch_normalization)
+        levels.append(current_layer)
+
+        if layer_depth < depth - 1:
+            current_layer = MaxPooling2D(pool_size=pool_size)(current_layer)
+
+    # add levels with up-convolution or up-sampling
+    for layer_depth in range(depth - 2, -1, -1):
+        up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,
+                                            n_filters=current_layer._keras_shape[1])(current_layer)
+        concat = concatenate([up_convolution, levels[layer_depth]], axis=1)
+        current_layer = create_convolution_block(n_filters=levels[layer_depth]._keras_shape[1],
+                                                 input_layer=concat, batch_normalization=batch_normalization)
+        if dropout_rate > 0:
+            current_layer = SpatialDropout2D(rate=dropout_rate)(current_layer)
+        current_layer = create_convolution_block(n_filters=levels[layer_depth]._keras_shape[1],
+                                                 input_layer=current_layer,
+                                                 batch_normalization=batch_normalization)
+
+    final_convolution = Conv2D(n_labels, (1, 1))(current_layer)
+    act = Activation(activation_name)(final_convolution)
+    act = Permute((2, 3, 1))(act)
+    model = Model(inputs=inputs, outputs=act)
+
+    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function, metrics=metrics)
+    return model
+
+
+def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3), activation=None,
+                             padding='same', strides=(1, 1), instance_normalization=False):
+    """
+    :param strides:
+    :param input_layer:
+    :param n_filters:
+    :param batch_normalization:
+    :param kernel:
+    :param activation: Keras activation layer to use. (default is 'relu')
+    :param padding:
+    :return:
+    """
+    layer = Conv2D(n_filters, kernel, padding=padding, strides=strides)(input_layer)
+    if batch_normalization:
+        layer = BatchNormalization(axis=1)(layer)
+    elif instance_normalization:
+        try:
+            from keras_contrib.layers.normalization import InstanceNormalization
+        except ImportError:
+            raise ImportError("Install keras_contrib in order to use instance normalization."
+                              "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git")
+        layer = InstanceNormalization(axis=1)(layer)
+    if activation is None:
+        layer = Activation('relu')(layer)
+    else:
+        layer = activation()(layer)
+
+    return layer
+
+
+def compute_level_output_shape(n_filters, depth, pool_size, image_shape):
+    """
+    Each level has a particular output shape based on the number of filters used in that level and the depth or number
+    of max pooling operations that have been done on the data at that point.
+    :param image_shape: shape of the 3d image.
+    :param pool_size: the pool_size parameter used in the max pooling operation.
+    :param n_filters: Number of filters used by the last node in a given level.
+    :param depth: The number of levels down in the U-shaped model a given node is.
+    :return: 5D vector of the shape of the output node
+    """
+    output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()
+    return tuple([None, n_filters] + output_image_shape)
+
+
+def get_up_convolution(n_filters, pool_size, kernel_size=(2, 2), strides=(2, 2),
+                       deconvolution=False):
+    if deconvolution:
+        return Deconvolution2D(filters=n_filters, kernel_size=kernel_size,
+                               strides=strides)
+    else:
+        return UpSampling2D(size=pool_size)