Diff of /modelQW.py [000000] .. [33d58f]

Switch to unified view

a b/modelQW.py
1
import numpy as np
2
import os
3
import skimage.io as io
4
import skimage.transform as trans
5
import numpy as np
6
from keras.models import *
7
from keras.layers import *
8
from keras.optimizers import *
9
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
10
from keras import backend as keras
11
import tensorflow as tf
12
from keras import initializers
13
from keras import regularizers
14
15
16
def colearning(pretrained_weights=None, input_size=(256, 256, 3)):
17
    inputs = Input(input_size)
18
    paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]])  # only pads dim 2 and 3 (h and w)
19
20
    [ inputtemp, inputspet,inputsct] = Lambda(tf.split, arguments={'axis': 3, 'num_or_size_splits': 3})(inputs)
21
22
    conv1ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputsct)
23
    conv1ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1ct)
24
    pool1ct = MaxPooling2D(pool_size=(2, 2))(conv1ct)
25
    conv2ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1ct)
26
    conv2ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2ct)
27
    pool2ct = MaxPooling2D(pool_size=(2, 2))(conv2ct)
28
    conv3ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2ct)
29
    conv3ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3ct)
30
    pool3ct = MaxPooling2D(pool_size=(2, 2))(conv3ct)
31
    conv4ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3ct)
32
    conv4ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4ct)
33
    drop4ct = Dropout(0.5)(conv4ct)
34
    pool4ct = MaxPooling2D(pool_size=(2, 2))(conv4ct)
35
36
    conv1pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputspet)
37
    conv1pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1pet)
38
    pool1pet = MaxPooling2D(pool_size=(2, 2))(conv1pet)
39
    conv2pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1pet)
40
    conv2pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2pet)
41
    pool2pet = MaxPooling2D(pool_size=(2, 2))(conv2pet)
42
    conv3pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2pet)
43
    conv3pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3pet)
44
    pool3pet = MaxPooling2D(pool_size=(2, 2))(conv3pet)
45
    conv4pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3pet)
46
    conv4pet = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4pet)
47
    drop4pet = Dropout(0.5)(conv4pet)
48
    pool4pet = MaxPooling2D(pool_size=(2, 2))(conv4pet)
49
50
51
    comerge1_temp = concatenate([pool1ct, pool1pet], axis=3)
52
    poolctexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool1ct)
53
    poolpetexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool1pet)
54
55
    comerge2_temp = concatenate([poolctexp_temp, poolpetexp_temp], axis=4)
56
    input_mm = Lambda(tf.pad, arguments={'paddings': (paddings), 'mode': ("CONSTANT")})(comerge2_temp)
57
    input_mm = Lambda(tf.transpose, arguments={'perm': ([0, 4, 1, 2, 3])})(input_mm)
58
    comerge2con_temp = Conv3D(filters=128, kernel_size=[2,3,3],
59
                         kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None),
60
                         kernel_regularizer=regularizers.l2(0.1),
61
                         bias_initializer='zeros', padding='valid', activation='relu')(input_mm)
62
    colearn_out_temp = Lambda(tf.squeeze, arguments={'squeeze_dims': (1)})(comerge2con_temp)
63
    conj1 = Lambda(tf.multiply, arguments={'y': (comerge1_temp)})(colearn_out_temp)
64
65
    comerge1_temp = concatenate([pool2ct, pool2pet], axis=3)
66
    poolctexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool2ct)
67
    poolpetexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool2pet)
68
69
    comerge2_temp = concatenate([poolctexp_temp, poolpetexp_temp], axis=4)
70
    input_mm = Lambda(tf.pad, arguments={'paddings': (paddings), 'mode': ("CONSTANT")})(comerge2_temp)
71
    input_mm = Lambda(tf.transpose, arguments={'perm': ([0, 4, 1, 2, 3])})(input_mm)
72
73
    comerge2con_temp = Conv3D(filters=128, kernel_size=[2,3,3],
74
                         kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None),
75
                         kernel_regularizer=regularizers.l2(0.1),
76
                         bias_initializer='zeros', padding='valid', activation='relu')(input_mm)
77
    colearn_out_temp = Lambda(tf.squeeze, arguments={'squeeze_dims': (1)})(comerge2con_temp)
78
    conj2 = Lambda(tf.multiply, arguments={'y': (comerge1_temp)})(colearn_out_temp)
79
80
81
    comerge1_temp = concatenate([pool3ct, pool3pet], axis=3)
82
    poolctexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool3ct)
83
    poolpetexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool3pet)
84
85
    comerge2_temp = concatenate([poolctexp_temp, poolpetexp_temp], axis=4)
86
87
    input_mm = Lambda(tf.pad, arguments={'paddings': (paddings), 'mode': ("CONSTANT")})(comerge2_temp)
88
    input_mm = Lambda(tf.transpose, arguments={'perm': ([0, 4, 1, 2, 3])})(input_mm)
89
    comerge2con_temp = Conv3D(filters=128, kernel_size=[2,3,3],
90
                         kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None),
91
                         kernel_regularizer=regularizers.l2(0.1),
92
                         bias_initializer='zeros', padding='valid', activation='relu')(input_mm)
93
    colearn_out_temp = Lambda(tf.squeeze, arguments={'squeeze_dims': (1)})(comerge2con_temp)
94
    conj3 = Lambda(tf.multiply, arguments={'y': (comerge1_temp)})(colearn_out_temp)
95
96
97
    comerge1_temp = concatenate([pool4ct, pool4pet], axis=3)
98
    poolctexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool4ct)
99
    poolpetexp_temp = Lambda(expand_dim_backend, arguments={'dim': (4)})(pool4pet)
100
101
    comerge2_temp = concatenate([poolctexp_temp, poolpetexp_temp], axis=4)
102
    input_mm = Lambda(tf.pad, arguments={'paddings': (paddings), 'mode': ("CONSTANT")})(comerge2_temp)
103
    input_mm = Lambda(tf.transpose, arguments={'perm': ([0, 4, 1, 2, 3])})(input_mm)
104
105
    comerge2con_temp = Conv3D(filters=128, kernel_size=[2,3,3],
106
                         kernel_initializer=initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None),
107
                         kernel_regularizer=regularizers.l2(0.1),
108
                         bias_initializer='zeros', padding='valid', activation='relu')(input_mm)
109
    colearn_out_temp = Lambda(tf.squeeze, arguments={'squeeze_dims': (1)})(comerge2con_temp)
110
    conj4 = Lambda(tf.multiply, arguments={'y': (comerge1_temp)})(colearn_out_temp)
111
112
    up5 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
113
        UpSampling2D(size=(2, 2))(conj4))
114
    conv5 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up5)
115
    conv5 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
116
    merge5 = concatenate([conj3, conv5], axis=3)
117
118
    up6 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
119
        UpSampling2D(size=(2, 2))(merge5))
120
    conv6 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up6)
121
    conv6 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
122
    merge6 = concatenate([conj2, conv6], axis=3)
123
124
    up7 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
125
        UpSampling2D(size=(2, 2))(merge6))
126
127
    conv7 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up7)
128
    conv7 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
129
    merge7 = concatenate([conj1, conv7], axis=3)
130
131
    up8 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
132
        UpSampling2D(size=(2, 2))(merge7))
133
134
    conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up8)
135
    conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
136
137
    conv9 = Conv2D(4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
138
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
139
140
141
    model = Model(input=inputs, output=conv10)
142
143
    model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
144
145
146
147
    if (pretrained_weights):
148
        model.load_weights(pretrained_weights)
149
150
    return model
151
152
def expand_dim_backend(x,dim):
153
    xe = K.expand_dims(x, dim)
154
    return xe