[4f54f1]: / model.py

Download this file

142 lines (112 with data), 5.4 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import tensorflow as tf
from functools import reduce
class Convolution3DNetwork(object):
DEFAULT_LAYER_PADDING = 'VALID'
DEFAULT_CONV_STRIDE = [1, 1, 1, 1, 1]
def __init__(self, config):
self._config = config
self._strides = self._config.get_strides()
self._pool_strides = self._config.get_pool_strides()
self._pool_windows = self._config.get_pool_windows()
self._init_weights()
self._init_biases()
def _init_weights(self):
self._weights = [
tf.Variable(init_func, name=name)
for name, init_func in self._config.get_fc_weights()
]
self._conv_weights = [
tf.Variable(init_func, name=name)
for name, init_func in self._config.get_conv_weights()
]
def _init_biases(self):
self._biases = [
tf.Variable(init_func, name=name)
for name, init_func in self._config.get_fc_biases()
]
self._conv_biases = [
tf.Variable(init_func, name=name)
for name, init_func in self._config.get_conv_biases()
]
def weights(self):
return self._conv_weights + self._weights
def l2_regularizer(self):
if self._config.with_l2_norm():
return reduce(lambda x, y: tf.nn.l2_loss(x) + tf.nn.l2_loss(y),
self._weights)
return 0
def biases(self):
return self._conv_biases + self._biases
# Create some wrappers for simplicity
def conv3d(self, x, W, b, name,
strides=DEFAULT_CONV_STRIDE,
padding=DEFAULT_LAYER_PADDING):
with tf.variable_scope(name) as scope:
# Conv3D wrapper, with bias and relu activation
x = tf.nn.conv3d(x, W, strides=strides,
padding=padding, name=scope.name)
x = tf.nn.bias_add(x, b, name='bias')
return tf.nn.relu(x, name='relu')
def maxpool3d(self, x, name, k,
strides=DEFAULT_CONV_STRIDE,
padding=DEFAULT_LAYER_PADDING):
# MaxPool3D wrapper
return tf.nn.max_pool3d(x, ksize=k, strides=strides,
padding=padding, name=name)
def fc(self, x, weights, bias, name, dropout=None, with_relu=True):
with tf.variable_scope(name) as scope:
fc = tf.add(tf.matmul(x, weights), bias, name=scope.name)
if with_relu:
fc = tf.nn.relu(fc, name='relu')
if dropout:
fc = tf.nn.dropout(fc, dropout)
return fc
# Create model
def conv_net(self, x, dropout):
# Convolution Layer
last_conv_layer = x
for i, weight in enumerate(self._conv_weights):
# Convolution Layer
last_conv_layer = self.conv3d(last_conv_layer, weight,
self._conv_biases[i],
name="conv" + str(i),
strides=self._strides[i])
# Max Pooling (down-sampling)
if self._pool_windows[i]:
last_conv_layer = self.maxpool3d(last_conv_layer,
name="pool" + str(i),
k=self._pool_windows[i],
strides=self._pool_strides[i])
print("After current layer: ", last_conv_layer.get_shape().as_list())
if self._config.has_dropout_after_convolutions():
last_conv_layer = tf.nn.dropout(last_conv_layer, dropout)
conv_shape = last_conv_layer.get_shape().as_list()
fully_con_input_size = reduce(lambda x, y: x * y, conv_shape[1:])
print("SHAPE of the last convolution layer after max pooling: {}, new shape {}".format(
conv_shape, fully_con_input_size))
# Fully connected layer
# Reshape conv output to fit fully connected layer input
number = conv_shape[0] or -1
fully_connected = tf.reshape(last_conv_layer, [number, fully_con_input_size])
for i, weight in enumerate(self._weights[:-1]):
if self._config.has_fc_dropout(i):
layer_dropout = dropout
else:
layer_dropout = None
fully_connected = self.fc(fully_connected,
weight,
self._biases[i],
name='fully_connected' + str(i),
dropout=layer_dropout)
# Output, class prediction
out = tf.add(tf.matmul(fully_connected, self._weights[-1]),
self._biases[-1], name='output_layer')
return out
def loss_function_with_logits(logits, labels, tensor_name='cost_func'):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels), name=tensor_name)
# Sparse sofmtax is used for mutually exclusive classes,
# labels rank must be logits rank - 1
def sparse_loss_with_logits(logits, labels, tensor_name='cost_func'):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels), name=tensor_name)