Diff of /model.py [000000] .. [4f54f1]

Switch to unified view

a b/model.py
1
import tensorflow as tf
2
from functools import reduce
3
4
5
class Convolution3DNetwork(object):
6
    DEFAULT_LAYER_PADDING = 'VALID'
7
    DEFAULT_CONV_STRIDE = [1, 1, 1, 1, 1]
8
9
    def __init__(self, config):
10
        self._config = config
11
        self._strides = self._config.get_strides()
12
        self._pool_strides = self._config.get_pool_strides()
13
        self._pool_windows = self._config.get_pool_windows()
14
15
        self._init_weights()
16
        self._init_biases()
17
18
19
    def _init_weights(self):
20
        self._weights = [
21
            tf.Variable(init_func, name=name) 
22
            for name, init_func in self._config.get_fc_weights()
23
            ]
24
        self._conv_weights = [
25
                tf.Variable(init_func, name=name) 
26
                for name, init_func in self._config.get_conv_weights()
27
            ]
28
29
    def _init_biases(self):
30
        self._biases = [
31
                tf.Variable(init_func, name=name) 
32
                for name, init_func in self._config.get_fc_biases()
33
            ]
34
        self._conv_biases = [
35
                tf.Variable(init_func, name=name) 
36
                for name, init_func in self._config.get_conv_biases()
37
            ]
38
39
    def weights(self):
40
        return self._conv_weights + self._weights
41
42
    def l2_regularizer(self):
43
        if self._config.with_l2_norm():
44
            return reduce(lambda x, y: tf.nn.l2_loss(x) + tf.nn.l2_loss(y),
45
                          self._weights)
46
        return 0
47
48
    def biases(self):
49
        return self._conv_biases + self._biases
50
51
    # Create some wrappers for simplicity
52
    def conv3d(self, x, W, b, name, 
53
               strides=DEFAULT_CONV_STRIDE,
54
               padding=DEFAULT_LAYER_PADDING):
55
        with tf.variable_scope(name) as scope:
56
            # Conv3D wrapper, with bias and relu activation
57
            x = tf.nn.conv3d(x, W, strides=strides, 
58
                             padding=padding, name=scope.name)
59
            x = tf.nn.bias_add(x, b, name='bias')
60
            return tf.nn.relu(x, name='relu')
61
62
    def maxpool3d(self, x, name, k, 
63
                  strides=DEFAULT_CONV_STRIDE, 
64
                  padding=DEFAULT_LAYER_PADDING):
65
        # MaxPool3D wrapper
66
        return tf.nn.max_pool3d(x, ksize=k, strides=strides, 
67
                                padding=padding, name=name)
68
69
    def fc(self, x, weights, bias, name, dropout=None, with_relu=True):
70
        with tf.variable_scope(name) as scope:
71
            fc = tf.add(tf.matmul(x, weights), bias, name=scope.name)
72
            if with_relu:
73
                fc = tf.nn.relu(fc, name='relu')
74
            if dropout:
75
                fc = tf.nn.dropout(fc, dropout)
76
77
            return fc
78
79
    # Create model
80
    def conv_net(self, x, dropout):
81
        # Convolution Layer
82
        last_conv_layer = x
83
84
        for i, weight in enumerate(self._conv_weights):
85
             # Convolution Layer
86
            last_conv_layer = self.conv3d(last_conv_layer, weight, 
87
                                         self._conv_biases[i],
88
                                         name="conv" + str(i), 
89
                                         strides=self._strides[i])
90
          
91
            # Max Pooling (down-sampling)
92
            if self._pool_windows[i]:
93
                last_conv_layer = self.maxpool3d(last_conv_layer, 
94
                                                name="pool" + str(i),
95
                                                k=self._pool_windows[i],
96
                                                strides=self._pool_strides[i])
97
98
            print("After current layer: ", last_conv_layer.get_shape().as_list())
99
        
100
        
101
        if self._config.has_dropout_after_convolutions():
102
            last_conv_layer = tf.nn.dropout(last_conv_layer, dropout)
103
104
        conv_shape = last_conv_layer.get_shape().as_list()
105
        fully_con_input_size = reduce(lambda x, y: x * y, conv_shape[1:])
106
        print("SHAPE of the last convolution layer after max pooling: {}, new shape {}".format(
107
            conv_shape, fully_con_input_size))
108
109
        # Fully connected layer
110
        # Reshape conv output to fit fully connected layer input
111
        number = conv_shape[0] or -1
112
        fully_connected = tf.reshape(last_conv_layer, [number, fully_con_input_size])
113
114
        for i, weight in enumerate(self._weights[:-1]):
115
            if self._config.has_fc_dropout(i):
116
                layer_dropout = dropout
117
            else:
118
                layer_dropout = None
119
120
            fully_connected = self.fc(fully_connected, 
121
                                     weight, 
122
                                     self._biases[i], 
123
                                     name='fully_connected' + str(i),
124
                                     dropout=layer_dropout)
125
        
126
        # Output, class prediction
127
        out = tf.add(tf.matmul(fully_connected, self._weights[-1]),
128
                     self._biases[-1], name='output_layer')
129
        return out
130
131
132
def loss_function_with_logits(logits, labels, tensor_name='cost_func'):
133
    return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
134
        logits=logits, labels=labels), name=tensor_name)
135
136
137
# Sparse sofmtax is used for mutually exclusive classes,
138
# labels rank must be logits rank - 1
139
def sparse_loss_with_logits(logits, labels, tensor_name='cost_func'):
140
    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
141
        logits=logits, labels=labels), name=tensor_name)