|
a |
|
b/he_j_inference/keras_model.py |
|
|
1 |
#coding:utf-8 |
|
|
2 |
import importlib |
|
|
3 |
import keras.backend as K |
|
|
4 |
from keras.engine import InputSpec |
|
|
5 |
from keras.layers import Input,Lambda,Dropout,Concatenate |
|
|
6 |
from keras.activations import softmax |
|
|
7 |
from keras.layers.core import Dense |
|
|
8 |
from keras.layers import Conv2D,Average,MaxPooling2D,AveragePooling2D,Add,Flatten |
|
|
9 |
from keras.layers import GlobalMaxPooling2D,GlobalAveragePooling2D,Multiply,LocallyConnected2D |
|
|
10 |
from keras.models import Model |
|
|
11 |
#import cv2 |
|
|
12 |
from keras.engine.topology import Layer |
|
|
13 |
import numpy as np |
|
|
14 |
import tensorflow as tf |
|
|
15 |
from custom_layers import * |
|
|
16 |
#from cbof import * |
|
|
17 |
#from LearnToPayAttention import AttentionVGG |
|
|
18 |
#batch_size=24 |
|
|
19 |
|
|
|
20 |
''' |
|
|
21 |
class WildcatPool2d(Layer): |
|
|
22 |
# initialize the layer, and set an extra parameter axis. No need to include inputs parameter |
|
|
23 |
def __init__(self,kmax=0.2,kmin=0.2,alpha=0.7, **kwargs): |
|
|
24 |
#self.axis = axis |
|
|
25 |
self.kmax = kmax |
|
|
26 |
self.kmin = kmin |
|
|
27 |
self.alpha = alpha |
|
|
28 |
self.result = None |
|
|
29 |
super(WildcatPool2d, self).__init__(**kwargs) |
|
|
30 |
|
|
|
31 |
# first use build function to define parameters, Creates the layer weights. |
|
|
32 |
# input_shape will automatic collect input shapes to build layer |
|
|
33 |
def build(self, input_shape): |
|
|
34 |
#print(input_shape) |
|
|
35 |
super(WildcatPool2d, self).build(input_shape) |
|
|
36 |
|
|
|
37 |
def get_positive_k(self, k, n): |
|
|
38 |
if k <= 0: |
|
|
39 |
return 0 |
|
|
40 |
elif k < 1: |
|
|
41 |
return K.cast(K.round(K.cast(n, dtype="float32")* |
|
|
42 |
K.cast(k, dtype="float32")),dtype="int32") |
|
|
43 |
elif k > n: |
|
|
44 |
return n |
|
|
45 |
else: |
|
|
46 |
return int(k) |
|
|
47 |
|
|
|
48 |
# This is where the layer's logic lives. In this example, I just concat two tensors. |
|
|
49 |
def call(self, x, **kwargs): |
|
|
50 |
batch_size, h, w, num_channels = K.shape(x)[0],K.shape(x)[1],K.shape(x)[2],K.shape(x)[3] |
|
|
51 |
n = h * w # number of regions |
|
|
52 |
kmax = self.get_positive_k(self.kmax, n) |
|
|
53 |
kmin = self.get_positive_k(self.kmin, n) |
|
|
54 |
x = K.reshape(x,(batch_size,n,num_channels)) |
|
|
55 |
x = K.permute_dimensions(x,(0,2,1)) |
|
|
56 |
x = tf.contrib.framework.sort(x,axis=-1,direction='DESCENDING') |
|
|
57 |
x_max = K.sum(x[:,:,:kmax],axis=-1,keepdims=False)/K.cast(kmax,dtype="float32") |
|
|
58 |
x_min = (K.sum(x[:,:,n-kmin:n],axis=-1,keepdims=False) |
|
|
59 |
*self.alpha / K.cast(kmin,dtype="float32")) |
|
|
60 |
self.result = Average()([x_max,x_min]) |
|
|
61 |
return self.result |
|
|
62 |
|
|
|
63 |
# return output shape |
|
|
64 |
def compute_output_shape(self, input_shape): |
|
|
65 |
#return K.int_shape(self.result)#(batch_size,num_classes) |
|
|
66 |
return tuple([input_shape[0],input_shape[3]]) |
|
|
67 |
''' |
|
|
68 |
|
|
|
69 |
|
|
|
70 |
|
|
|
71 |
|
|
|
72 |
################################################################# |
|
|
73 |
|
|
|
74 |
def target_category_loss(x, category_index, nb_classes): |
|
|
75 |
#batch_label=K.zeros((K.shape(x)[0],nb_classes)) |
|
|
76 |
#batch_label=batch_label[:,category_index].assign(K.ones((K.shape(x)[0],))) |
|
|
77 |
batch_label=K.zeros((batch_size,nb_classes)) |
|
|
78 |
batch_label=batch_label[:,category_index].assign(K.ones((batch_size,))) |
|
|
79 |
return tf.multiply(x, batch_label) |
|
|
80 |
|
|
|
81 |
def target_category_loss_output_shape(input_shape): |
|
|
82 |
return input_shape |
|
|
83 |
|
|
|
84 |
def normalize(x): |
|
|
85 |
# utility function to normalize a tensor by its L2 norm |
|
|
86 |
return x / (K.sqrt(K.mean(K.square(x),axis=(1,2,3),keepdims=True)) + 1e-5) |
|
|
87 |
|
|
|
88 |
|
|
|
89 |
class Get_grads(Layer): |
|
|
90 |
def __init__(self, **kwargs): |
|
|
91 |
#self.axis = axis |
|
|
92 |
self.result = None |
|
|
93 |
super(Get_grads, self).__init__(**kwargs) |
|
|
94 |
def build(self, input_shape): |
|
|
95 |
print(input_shape) |
|
|
96 |
super(Get_grads, self).build(input_shape) |
|
|
97 |
def call(self, x, **kwargs): |
|
|
98 |
self.result = normalize(K.gradients(x[0], x[1])[0]) |
|
|
99 |
return self.result |
|
|
100 |
def compute_output_shape(self, input_shape): |
|
|
101 |
return K.int_shape(self.result) |
|
|
102 |
|
|
|
103 |
|
|
|
104 |
# 冻上base_model所有层,这样就可以正确获得bottleneck特征 |
|
|
105 |
def setup_to_transfer_learn(base_model): |
|
|
106 |
"""Freeze all layers and compile the model""" |
|
|
107 |
for layer in base_model.layers: |
|
|
108 |
layer.trainable = False |
|
|
109 |
|
|
|
110 |
''' |
|
|
111 |
def lr_multiply(base_model): |
|
|
112 |
for layer in base_model.layers: |
|
|
113 |
layer.W_learning_rate_multiplier = args_dict.lrmult_conv |
|
|
114 |
layer.b_learning_rate_multiplier = args_dict.lrmult_conv |
|
|
115 |
''' |
|
|
116 |
|
|
|
117 |
|
|
|
118 |
class ModelFactory: |
|
|
119 |
""" |
|
|
120 |
Model facotry for Keras default models |
|
|
121 |
""" |
|
|
122 |
def __init__(self): |
|
|
123 |
self.models_ = dict( |
|
|
124 |
VGG16=dict( |
|
|
125 |
input_shape=(224, 224, 3), |
|
|
126 |
module_name="vgg16", |
|
|
127 |
last_conv_layer="block5_conv3", |
|
|
128 |
), |
|
|
129 |
VGG19=dict( |
|
|
130 |
input_shape=(224, 224, 3), |
|
|
131 |
module_name="vgg19", |
|
|
132 |
last_conv_layer="block5_conv4", |
|
|
133 |
), |
|
|
134 |
DenseNet121=dict( |
|
|
135 |
input_shape=(224, 224, 3), |
|
|
136 |
module_name="densenet", |
|
|
137 |
last_conv_layer="bn", |
|
|
138 |
), |
|
|
139 |
DenseNet169=dict( |
|
|
140 |
input_shape=(224, 224, 3), |
|
|
141 |
module_name="densenet", |
|
|
142 |
last_conv_layer="bn", |
|
|
143 |
), |
|
|
144 |
ResNet50=dict( |
|
|
145 |
input_shape=(224, 224, 3), |
|
|
146 |
module_name="resnet50", |
|
|
147 |
last_conv_layer="activation_49", |
|
|
148 |
), |
|
|
149 |
InceptionV3=dict( |
|
|
150 |
input_shape=(299, 299, 3), |
|
|
151 |
module_name="inception_v3", |
|
|
152 |
last_conv_layer="mixed10", |
|
|
153 |
), |
|
|
154 |
InceptionResNetV2=dict( |
|
|
155 |
input_shape=(299, 299, 3), |
|
|
156 |
module_name="inception_resnet_v2", |
|
|
157 |
last_conv_layer="conv_7b_ac", |
|
|
158 |
), |
|
|
159 |
NASNetMobile=dict( |
|
|
160 |
input_shape=(224, 224, 3), |
|
|
161 |
module_name="nasnet", |
|
|
162 |
last_conv_layer="activation_188", |
|
|
163 |
), |
|
|
164 |
NASNetLarge=dict( |
|
|
165 |
input_shape=(331, 331, 3), |
|
|
166 |
module_name="nasnet", |
|
|
167 |
last_conv_layer="activation_260", |
|
|
168 |
), |
|
|
169 |
DarkNet19_448=dict( |
|
|
170 |
input_shape=(224, 224, 3), |
|
|
171 |
module_name="darknet19_448", |
|
|
172 |
last_conv_layer="activation_260", |
|
|
173 |
), |
|
|
174 |
Xception=dict( |
|
|
175 |
input_shape=(299, 299, 3), |
|
|
176 |
module_name="xception", |
|
|
177 |
last_conv_layer="activation_260", |
|
|
178 |
), |
|
|
179 |
) |
|
|
180 |
|
|
|
181 |
def get_last_conv_layer(self, model_name): |
|
|
182 |
return self.models_[model_name]["last_conv_layer"] |
|
|
183 |
|
|
|
184 |
def get_input_size(self, model_name): |
|
|
185 |
return self.models_[model_name]["input_shape"][:2] |
|
|
186 |
|
|
|
187 |
def get_model(self, class_names, model_name="DenseNet121" |
|
|
188 |
, use_base_weights=True, weights_path=None |
|
|
189 |
, input_shape=None, model_id=7): |
|
|
190 |
|
|
|
191 |
if use_base_weights is True: |
|
|
192 |
base_weights = "imagenet" |
|
|
193 |
else: |
|
|
194 |
base_weights = None |
|
|
195 |
|
|
|
196 |
base_model_class = getattr( |
|
|
197 |
importlib.import_module( |
|
|
198 |
#f"keras.applications.{self.models_[model_name]['module_name']}" |
|
|
199 |
"keras.applications."+self.models_[model_name]['module_name'] |
|
|
200 |
), |
|
|
201 |
model_name) |
|
|
202 |
|
|
|
203 |
if input_shape is None: |
|
|
204 |
input_shape = self.models_[model_name]["input_shape"] |
|
|
205 |
|
|
|
206 |
img_input = Input(shape=input_shape) |
|
|
207 |
base_model = None |
|
|
208 |
base_model = base_model_class( |
|
|
209 |
include_top=False, |
|
|
210 |
input_tensor=img_input, |
|
|
211 |
input_shape=input_shape, |
|
|
212 |
weights=base_weights, |
|
|
213 |
pooling="avg") |
|
|
214 |
''' |
|
|
215 |
train bcnn with two steps: |
|
|
216 |
1.freeze base models,only train bilinear pooling and last fc layers with high lr=0.01 |
|
|
217 |
2.train all layers with lr=0.001 |
|
|
218 |
''' |
|
|
219 |
#setup_to_transfer_learn(base_model) |
|
|
220 |
|
|
|
221 |
layer_dict = dict([(layer.name, layer) for layer in base_model.layers]) |
|
|
222 |
conv_outputs = None #last conv output |
|
|
223 |
if model_name=="VGG16": |
|
|
224 |
block4_conv3 = layer_dict["block4_conv3"] |
|
|
225 |
block4_conv3_outputs = block4_conv3.output |
|
|
226 |
final_conv_layer = layer_dict["block5_conv3"] |
|
|
227 |
conv_outputs = final_conv_layer.output |
|
|
228 |
if model_name=="DenseNet121" or model_name=="DenseNet169": |
|
|
229 |
final_conv_layer = layer_dict["bn"] |
|
|
230 |
conv_outputs = final_conv_layer.output |
|
|
231 |
if model_name=="InceptionV3": |
|
|
232 |
final_conv_layer = layer_dict["mixed10"] |
|
|
233 |
conv_outputs = final_conv_layer.output |
|
|
234 |
if model_id == 0: |
|
|
235 |
x = base_model.output |
|
|
236 |
'''x = conv_outputs |
|
|
237 |
|
|
|
238 |
##############SE module#################### |
|
|
239 |
squeeze = GlobalAveragePooling2D()(x) |
|
|
240 |
excitation = Dense(units=512 // 4, activation='relu')(squeeze) |
|
|
241 |
#excitation = Activation('relu')(excitation) |
|
|
242 |
excitation = Dense(units=512, activation='sigmoid')(excitation) |
|
|
243 |
#excitation = Activation('sigmoid')(excitation) |
|
|
244 |
excitation = Reshape((1,1,512))(excitation) |
|
|
245 |
x = Multiply()([x,excitation]) |
|
|
246 |
#x = SqueezeExcitation(512)(x) |
|
|
247 |
########################################### |
|
|
248 |
spatial_att = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(x) |
|
|
249 |
spatial_att = Conv2D(1, (1, 1), activation='sigmoid', padding='same', name='loc')(spatial_att) |
|
|
250 |
x = Multiply()([x,spatial_att]) |
|
|
251 |
x = GlobalAveragePooling2D()(x)''' |
|
|
252 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
253 |
elif model_id == 1: |
|
|
254 |
loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
255 |
#conv6 = LocallyConnected2D(32, (3, 3), activation='relu', padding='valid', name='conv6')(cccp) |
|
|
256 |
loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
257 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
258 |
x = base_model.output |
|
|
259 |
#x = conv_outputs |
|
|
260 |
#x = x * loc |
|
|
261 |
#AttributeError: 'Tensor' object has no attribute '_keras_history'此处不能用后端函数 |
|
|
262 |
#x = Multiply()([x,loc]) |
|
|
263 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
264 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
265 |
#x = GlobalAveragePooling2D()(x) |
|
|
266 |
#x = GlobalMaxPooling2D()(x) |
|
|
267 |
#x = Dropout(rate=0.5)(x) |
|
|
268 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
269 |
elif model_id == 2: |
|
|
270 |
#x = base_model.output |
|
|
271 |
x = conv_outputs |
|
|
272 |
#x = Multiply()([x,loc]) |
|
|
273 |
z_l2=BilinearPooling()(x) |
|
|
274 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
275 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
276 |
#x = GlobalAveragePooling2D()(x) |
|
|
277 |
#x = GlobalMaxPooling2D()(x) |
|
|
278 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(z_l2) |
|
|
279 |
#freeze_model = Model(inputs=img_input, output=predictions) |
|
|
280 |
#setup_to_transfer_learn(freeze_model) |
|
|
281 |
loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
282 |
loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
283 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
284 |
elif model_id == 3: |
|
|
285 |
loc0 = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
286 |
loc0 = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc0) |
|
|
287 |
loc0 = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc0')(loc0) |
|
|
288 |
loc1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(block4_conv3_outputs) |
|
|
289 |
loc1 = Conv2D(256, (1, 1), activation='relu', padding='same', name='conv7')(loc1) |
|
|
290 |
loc1 = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc1')(loc1) |
|
|
291 |
my_resize1 = Lambda(lambda x: K.repeat_elements(x, 2, axis=1)) |
|
|
292 |
x = conv_outputs |
|
|
293 |
x_att = Multiply()([x,loc0]) |
|
|
294 |
loc0 = my_resize1(loc0) |
|
|
295 |
my_resize2 = Lambda(lambda x: K.repeat_elements(x, 2, axis=2)) |
|
|
296 |
loc0 = my_resize2(loc0) |
|
|
297 |
#loc = Add(name='loc')([loc0, loc1]) |
|
|
298 |
loc = Average(name='loc')([loc0, loc1]) |
|
|
299 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
300 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
301 |
#x = GlobalAveragePooling2D()(x) |
|
|
302 |
x1 = GlobalMaxPooling2D()(x_att) |
|
|
303 |
#x = Dropout(rate=0.5)(x) |
|
|
304 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x1) |
|
|
305 |
x = my_resize1(x) |
|
|
306 |
x = my_resize2(x) |
|
|
307 |
x_merge = Concatenate(axis=-1)([x,block4_conv3_outputs]) |
|
|
308 |
x_att1 = Multiply()([x_merge,loc]) |
|
|
309 |
x2 = GlobalMaxPooling2D()(x_att1) |
|
|
310 |
predictions1 = Dense(len(class_names), activation="softmax", name="cls_pred1")(x2) |
|
|
311 |
elif model_id == 4: |
|
|
312 |
loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp')(conv_outputs) |
|
|
313 |
loc = Conv2D(256, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
314 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
315 |
#TypeError: Output tensors to a Model must be Keras tensors. Found: Tensor("Squeeze:0", shape=(?, 14, 14), dtype=float32) |
|
|
316 |
#loc = K.squeeze(loc,axis=3) |
|
|
317 |
x = conv_outputs |
|
|
318 |
x = AveragePooling2D(pool_size=(2, 2))(x) |
|
|
319 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp2')(x) |
|
|
320 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
321 |
#x = GlobalAveragePooling2D()(x) |
|
|
322 |
#x = GlobalMaxPooling2D()(x) |
|
|
323 |
x = NoisyAnd()(x) |
|
|
324 |
#x = GlobalMaxPooling2D()(x) |
|
|
325 |
#print predictions.shape |
|
|
326 |
#my_reshape = Lambda(lambda x: K.reshape(x, (-1, x.shape[3]))) |
|
|
327 |
#x = my_reshape(x) |
|
|
328 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
329 |
elif model_id == 5: |
|
|
330 |
loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
331 |
loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
332 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
333 |
#x = base_model.output |
|
|
334 |
x = conv_outputs |
|
|
335 |
x = AveragePooling2D(pool_size=(2, 2))(x) |
|
|
336 |
x1 = AveragePooling2D(pool_size=(2, 2))(x) |
|
|
337 |
#x = Multiply()([x,loc]) |
|
|
338 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
339 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
340 |
x = Conv2D(2, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
341 |
x1 = Conv2D(2, (1, 1), activation='relu', padding='same', name='cccp2')(x1) |
|
|
342 |
#x = GlobalAveragePooling2D()(x) |
|
|
343 |
#x = GlobalMaxPooling2D()(x) |
|
|
344 |
x = Softmax4D()(x) |
|
|
345 |
#x = GlobalMaxPooling2D()(x) |
|
|
346 |
x1 = Softmax4D()(x1) |
|
|
347 |
x = MaxPooling2D(pool_size=(14, 14))(x) |
|
|
348 |
x1 = MaxPooling2D(pool_size=(7, 7))(x1) |
|
|
349 |
x = Flatten(name='flatten')(x) |
|
|
350 |
x1 = Flatten(name='flatten1')(x1) |
|
|
351 |
predictions = Recalc(axis=1, name='cls_pred0')(x) |
|
|
352 |
predictions1 = Recalc(axis=1, name='cls_pred1')(x1) |
|
|
353 |
#predictions1 = Recalc(axis=1)(x1) |
|
|
354 |
predictions = Average(name='cls_pred')([predictions, predictions1]) |
|
|
355 |
#predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
356 |
elif model_id == 6: |
|
|
357 |
x = base_model.output |
|
|
358 |
pred = Dense(len(class_names), activation="softmax", name="pred")(x) |
|
|
359 |
target_layer = Lambda(lambda x: target_category_loss(x, 1, 2),output_shape = target_category_loss_output_shape) |
|
|
360 |
gc = target_layer(pred) |
|
|
361 |
get_loss = Lambda(lambda x: K.sum(x,axis=1)) |
|
|
362 |
loss = get_loss(gc) |
|
|
363 |
grads = Get_grads()([loss, conv_outputs]) |
|
|
364 |
get_weights = Lambda(lambda x: K.mean(x, axis = (1, 2),keepdims=True)) |
|
|
365 |
weights = get_weights(grads) |
|
|
366 |
my_resize1 = Lambda(lambda x: K.repeat_elements(x, conv_outputs.shape[1], axis=1)) |
|
|
367 |
weights = my_resize1(weights) |
|
|
368 |
my_resize2 = Lambda(lambda x: K.repeat_elements(x, conv_outputs.shape[2], axis=2)) |
|
|
369 |
weights = my_resize2(weights) |
|
|
370 |
grad_cam = Multiply()([conv_outputs,weights]) |
|
|
371 |
loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(conv_outputs) |
|
|
372 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
373 |
#x = base_model.output |
|
|
374 |
#x = Multiply()([x,loc]) |
|
|
375 |
#x = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp1')(x) |
|
|
376 |
#x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv7')(x) |
|
|
377 |
#x = GlobalAveragePooling2D()(x) |
|
|
378 |
#x = GlobalMaxPooling2D()(x) |
|
|
379 |
x = GlobalMaxPooling2D()(grad_cam) |
|
|
380 |
#x1 = MaxPooling2D(pool_size=(7, 7))(x1) |
|
|
381 |
#x = Flatten(name='flatten')(x) |
|
|
382 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
383 |
elif model_id == 7: |
|
|
384 |
#loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
385 |
#loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
386 |
#loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
387 |
x = conv_outputs |
|
|
388 |
#x = Multiply()([x,loc]) |
|
|
389 |
#num_maps=8 |
|
|
390 |
classes=2 |
|
|
391 |
#x = Conv2D(num_maps*classes, (1, 1), activation='relu', padding='same', name='cccp')(x) |
|
|
392 |
#x = ClassWisePool()(x) |
|
|
393 |
x = WildcatPool2d()(x) |
|
|
394 |
#x = LogSumExp()(x) |
|
|
395 |
#predictions = Recalc(axis=1, name='cls_pred')(x)# |
|
|
396 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
397 |
#predictions = Dense(len(class_names), activation='sigmoid', name='cls_pred')(x) |
|
|
398 |
elif model_id == 8: |
|
|
399 |
#loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
400 |
#loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
401 |
#loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
402 |
x = conv_outputs |
|
|
403 |
#x = Multiply()([x,loc]) |
|
|
404 |
x = LogSumExp(r=1)(x) |
|
|
405 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
406 |
elif model_id == 9: |
|
|
407 |
loc = Conv2D(512, (1, 1), activation='relu', padding='same', name='cccp0')(conv_outputs) |
|
|
408 |
loc = Conv2D(128, (1, 1), activation='relu', padding='same', name='conv6')(loc) |
|
|
409 |
loc = Conv2D(1, (1, 1), activation='relu', padding='same', name='loc')(loc) |
|
|
410 |
x = conv_outputs |
|
|
411 |
n_codewords=128 |
|
|
412 |
x=BoF_Pooling(n_codewords, spatial_level=0)(x) |
|
|
413 |
predictions = Dense(len(class_names), activation="softmax", name="cls_pred")(x) |
|
|
414 |
elif model_id == 10: |
|
|
415 |
base_model=AttentionVGG(img_input, outputclasses=2, batchnorm=False, batchnormalizeinput=False).model |
|
|
416 |
model = Model(inputs=img_input, output=#base_model.output#predictions,#predictions1, |
|
|
417 |
predictions |
|
|
418 |
#loc |
|
|
419 |
|
|
|
420 |
) |
|
|
421 |
if weights_path == "": |
|
|
422 |
weights_path = None |
|
|
423 |
|
|
|
424 |
if weights_path is not None: |
|
|
425 |
#print(f"load model weights_path: {weights_path}") |
|
|
426 |
print ("load model weights_path: {}".format(weights_path)) |
|
|
427 |
model.load_weights(weights_path, by_name=True) |
|
|
428 |
return model |