|
a |
|
b/aggmap/AggMapNet.py |
|
|
1 |
# -*- coding: utf-8 -*- |
|
|
2 |
""" |
|
|
3 |
Created on Sun Aug 16 17:10:53 2020 |
|
|
4 |
|
|
|
5 |
@author: wanxiang.shen@u.nus.edu |
|
|
6 |
""" |
|
|
7 |
|
|
|
8 |
import warnings, os |
|
|
9 |
warnings.filterwarnings("ignore") |
|
|
10 |
|
|
|
11 |
|
|
|
12 |
import numpy as np |
|
|
13 |
import pandas as pd |
|
|
14 |
import tensorflow as tf |
|
|
15 |
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) |
|
|
16 |
|
|
|
17 |
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted |
|
|
18 |
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin |
|
|
19 |
from sklearn.utils.multiclass import unique_labels |
|
|
20 |
from sklearn.metrics import get_scorer, SCORERS |
|
|
21 |
|
|
|
22 |
from aggmap import aggmodel |
|
|
23 |
from aggmap.aggmodel.explain_dev import GlobalIMP, LocalIMP |
|
|
24 |
from aggmap.aggmodel.explainer import shapley_explainer, simply_explainer |
|
|
25 |
|
|
|
26 |
from joblib import dump, load |
|
|
27 |
from copy import copy |
|
|
28 |
from tensorflow.keras.models import load_model as load_tf_model |
|
|
29 |
|
|
|
30 |
import gc |
|
|
31 |
import tensorflow.keras.backend as K |
|
|
32 |
import tensorflow as tf |
|
|
33 |
|
|
|
34 |
|
|
|
35 |
def clean(clf): |
|
|
36 |
del clf._model |
|
|
37 |
del clf._performance |
|
|
38 |
del clf |
|
|
39 |
gc.collect() |
|
|
40 |
K.clear_session() |
|
|
41 |
tf.compat.v1.reset_default_graph() # TF graph isn't same as Keras graph |
|
|
42 |
|
|
|
43 |
|
|
|
44 |
def save_model(model, model_path): |
|
|
45 |
if not os.path.exists(model_path): |
|
|
46 |
os.makedirs(model_path) |
|
|
47 |
print('saving model to %s' % model_path) |
|
|
48 |
model_new = copy(model) |
|
|
49 |
model_new._model.save(os.path.join(model_path, 'inner_model.h5')) |
|
|
50 |
model_new._model = None |
|
|
51 |
model_new._performance = None |
|
|
52 |
res = dump(model_new, os.path.join(model_path, 'outer_model.est')) |
|
|
53 |
return res |
|
|
54 |
|
|
|
55 |
def load_model(model_path, gpuid=None): |
|
|
56 |
''' |
|
|
57 |
gpuid: load model to specific gpu: {None, 0, 1, 2, 3,..} |
|
|
58 |
''' |
|
|
59 |
model = load(os.path.join(model_path, 'outer_model.est')) |
|
|
60 |
if gpuid==None: |
|
|
61 |
gpuid = model.gpuid |
|
|
62 |
else: |
|
|
63 |
gpuid = str(gpuid) |
|
|
64 |
os.environ["CUDA_VISIBLE_DEVICES"]= gpuid |
|
|
65 |
model.gpuid = gpuid |
|
|
66 |
model._model = load_tf_model(os.path.join(model_path, 'inner_model.h5')) |
|
|
67 |
return model |
|
|
68 |
|
|
|
69 |
|
|
|
70 |
class RegressionEstimator(BaseEstimator, RegressorMixin): |
|
|
71 |
""" An AggMap CNN Regression estimator (each sample belongs to only one class) |
|
|
72 |
|
|
|
73 |
Parameters |
|
|
74 |
---------- |
|
|
75 |
epochs : int, default = 200 |
|
|
76 |
A parameter used for training epochs. |
|
|
77 |
conv1_kernel_size: int, default = 13 |
|
|
78 |
A parameter used for the kernel size of first covolutional layers |
|
|
79 |
dense_layers: list, default = [128] |
|
|
80 |
A parameter used for the dense layers. |
|
|
81 |
batch_size: int, default: 128 |
|
|
82 |
A parameter used for the batch size. |
|
|
83 |
lr: float, default: 1e-4 |
|
|
84 |
A parameter used for the learning rate. |
|
|
85 |
loss:string or function, default: 'mse' |
|
|
86 |
A parameter used for the loss function |
|
|
87 |
batch_norm: bool, default: False |
|
|
88 |
batch normalization after convolution layers. |
|
|
89 |
n_inception: int, default:2 |
|
|
90 |
Number of the inception layers. |
|
|
91 |
dense_avf: str, default is 'relu' |
|
|
92 |
activation fuction in the dense layers. |
|
|
93 |
dropout: float, default: 0 |
|
|
94 |
A parameter used for the dropout of the dense layers. |
|
|
95 |
monitor: str, default: 'val_loss' |
|
|
96 |
{'val_loss', 'val_r2'}, a monitor for model selection |
|
|
97 |
metric: str, default: 'r2' |
|
|
98 |
{'r2', 'rmse'}, a matric parameter |
|
|
99 |
patience: int, default: 10000 |
|
|
100 |
A parameter used for early stopping |
|
|
101 |
gpuid: int, default: 0 |
|
|
102 |
A parameter used for specific gpu card |
|
|
103 |
verbose: int, default: 0 |
|
|
104 |
if positive, then the log infomation of AggMapNet will be print |
|
|
105 |
if negative, then the log infomation of orignal model will be print |
|
|
106 |
random_state: int, default: 32 |
|
|
107 |
random seed. |
|
|
108 |
|
|
|
109 |
|
|
|
110 |
Examples |
|
|
111 |
-------- |
|
|
112 |
>>> from aggmap import AggModel |
|
|
113 |
>>> clf = AggModel.RegressionEstimator() |
|
|
114 |
|
|
|
115 |
""" |
|
|
116 |
|
|
|
117 |
def __init__(self, |
|
|
118 |
epochs = 200, |
|
|
119 |
conv1_kernel_size = 13, |
|
|
120 |
dense_layers = [128], |
|
|
121 |
dense_avf = 'relu', |
|
|
122 |
batch_size = 128, |
|
|
123 |
lr = 1e-4, |
|
|
124 |
loss = 'mse', |
|
|
125 |
batch_norm = False, |
|
|
126 |
n_inception = 2, |
|
|
127 |
dropout = 0.0, |
|
|
128 |
monitor = 'val_loss', |
|
|
129 |
metric = 'r2', |
|
|
130 |
patience = 10000, |
|
|
131 |
verbose = 0, |
|
|
132 |
random_state = 32, |
|
|
133 |
gpuid = 0, |
|
|
134 |
): |
|
|
135 |
|
|
|
136 |
|
|
|
137 |
self.epochs = epochs |
|
|
138 |
self.dense_layers = dense_layers |
|
|
139 |
self.conv1_kernel_size = conv1_kernel_size |
|
|
140 |
self.dense_avf = dense_avf |
|
|
141 |
self.batch_size = batch_size |
|
|
142 |
self.lr = lr |
|
|
143 |
self.loss = loss |
|
|
144 |
self.batch_norm = batch_norm |
|
|
145 |
self.n_inception = n_inception |
|
|
146 |
self.dropout = dropout |
|
|
147 |
self.monitor = monitor |
|
|
148 |
self.metric = metric |
|
|
149 |
self.patience = patience |
|
|
150 |
|
|
|
151 |
self.gpuid = str(gpuid) |
|
|
152 |
os.environ["CUDA_VISIBLE_DEVICES"]= self.gpuid |
|
|
153 |
|
|
|
154 |
self.verbose = verbose |
|
|
155 |
self.random_state = random_state |
|
|
156 |
self.is_fit = False |
|
|
157 |
self.name = "AggMap Regression Estimator" |
|
|
158 |
|
|
|
159 |
#print(self.get_params()) |
|
|
160 |
|
|
|
161 |
self.history = {} |
|
|
162 |
self.history_model = {} |
|
|
163 |
|
|
|
164 |
if self.verbose > 0: |
|
|
165 |
self.verbose1 = self.verbose |
|
|
166 |
self.verbose2 = 0 |
|
|
167 |
elif self.verbose ==0: |
|
|
168 |
self.verbose1 = 0 |
|
|
169 |
self.verbose2 = 0 |
|
|
170 |
elif self.verbose < 0: |
|
|
171 |
self.verbose1 = 0 |
|
|
172 |
self.verbose2 = abs(self.verbose) |
|
|
173 |
print(self) |
|
|
174 |
|
|
|
175 |
def get_params(self, deep=True): |
|
|
176 |
|
|
|
177 |
model_paras = {"epochs": self.epochs, |
|
|
178 |
"lr":self.lr, |
|
|
179 |
"loss":self.loss, |
|
|
180 |
"conv1_kernel_size": self.conv1_kernel_size, |
|
|
181 |
"dense_layers": self.dense_layers, |
|
|
182 |
"dense_avf":self.dense_avf, |
|
|
183 |
"batch_size":self.batch_size, |
|
|
184 |
"dropout":self.dropout, |
|
|
185 |
"batch_norm":self.batch_norm, |
|
|
186 |
"n_inception":self.n_inception, |
|
|
187 |
"monitor": self.monitor, |
|
|
188 |
"metric":self.metric, |
|
|
189 |
"patience":self.patience, |
|
|
190 |
"random_state":self.random_state, |
|
|
191 |
"verbose":self.verbose, |
|
|
192 |
"name":self.name, |
|
|
193 |
"gpuid": self.gpuid, |
|
|
194 |
} |
|
|
195 |
|
|
|
196 |
return model_paras |
|
|
197 |
|
|
|
198 |
|
|
|
199 |
def set_params(self, **parameters): |
|
|
200 |
for parameter, value in parameters.items(): |
|
|
201 |
setattr(self, parameter, value) |
|
|
202 |
return self |
|
|
203 |
|
|
|
204 |
|
|
|
205 |
|
|
|
206 |
def fit(self, X, y, X_valid = None, y_valid = None): |
|
|
207 |
|
|
|
208 |
# Check that X and y have correct shape |
|
|
209 |
|
|
|
210 |
if X.ndim != 4: |
|
|
211 |
raise ValueError("Found array X with dim %d. %s expected == 4." % (X.ndim, self.name)) |
|
|
212 |
|
|
|
213 |
if y.ndim != 2: |
|
|
214 |
raise ValueError("Found array y with dim %d. %s expected == 2." % (y.ndim, self.name)) |
|
|
215 |
|
|
|
216 |
self.X_ = X |
|
|
217 |
self.y_ = y |
|
|
218 |
|
|
|
219 |
if (X_valid is None) | (y_valid is None): |
|
|
220 |
X_valid = X |
|
|
221 |
y_valid = y |
|
|
222 |
|
|
|
223 |
np.random.seed(self.random_state) |
|
|
224 |
tf.compat.v1.set_random_seed(self.random_state) |
|
|
225 |
|
|
|
226 |
|
|
|
227 |
model = aggmodel.net._AggMapNet(X.shape[1:], |
|
|
228 |
n_outputs = y.shape[-1], |
|
|
229 |
conv1_kernel_size = self.conv1_kernel_size, |
|
|
230 |
batch_norm = self.batch_norm, |
|
|
231 |
n_inception = self.n_inception, |
|
|
232 |
dense_layers = self.dense_layers, |
|
|
233 |
dense_avf = self.dense_avf, |
|
|
234 |
dropout = self.dropout, |
|
|
235 |
last_avf = 'linear') |
|
|
236 |
|
|
|
237 |
|
|
|
238 |
opt = tf.keras.optimizers.Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # |
|
|
239 |
model.compile(optimizer = opt, loss = self.loss) |
|
|
240 |
performance = aggmodel.cbks.Reg_EarlyStoppingAndPerformance((X, y), |
|
|
241 |
(X_valid, y_valid), |
|
|
242 |
patience = self.patience, |
|
|
243 |
criteria = self.monitor, |
|
|
244 |
verbose = self.verbose1,) |
|
|
245 |
|
|
|
246 |
history = model.fit(X, y, |
|
|
247 |
batch_size=self.batch_size, |
|
|
248 |
epochs= self.epochs, verbose= self.verbose2, shuffle = True, |
|
|
249 |
validation_data = (X_valid, y_valid), |
|
|
250 |
callbacks=[performance]) |
|
|
251 |
|
|
|
252 |
self._model = model |
|
|
253 |
self._performance = performance |
|
|
254 |
self.history = self._performance.history |
|
|
255 |
self.history_model = history.history |
|
|
256 |
self.is_fit = True |
|
|
257 |
# Return the classifier |
|
|
258 |
return self |
|
|
259 |
|
|
|
260 |
|
|
|
261 |
|
|
|
262 |
def predict(self, X): |
|
|
263 |
""" |
|
|
264 |
Parameters |
|
|
265 |
---------- |
|
|
266 |
X : array-like of shape (n_samples, n_features_w, n_features_h, n_features_c) |
|
|
267 |
Vector to be scored, where `n_samples` is the number of samples and |
|
|
268 |
|
|
|
269 |
Returns |
|
|
270 |
------- |
|
|
271 |
T : array-like of shape (n_samples, n_classes) |
|
|
272 |
Returns the predicted values |
|
|
273 |
""" |
|
|
274 |
y_pred = self._model.predict(X, verbose = self.verbose) |
|
|
275 |
return y_pred |
|
|
276 |
|
|
|
277 |
|
|
|
278 |
|
|
|
279 |
def score(self, X, y, scoring = 'r2', sample_weight=None): |
|
|
280 |
"""Returns the score using the `scoring` option on the given |
|
|
281 |
test data and labels. |
|
|
282 |
|
|
|
283 |
Parameters |
|
|
284 |
---------- |
|
|
285 |
X : array-like of shape (n_samples, n_features) |
|
|
286 |
Test samples. |
|
|
287 |
y : array-like of shape (n_samples,) |
|
|
288 |
True labels for X. |
|
|
289 |
scoring: str, please refer to: https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter |
|
|
290 |
sample_weight : array-like of shape (n_samples,), default=None |
|
|
291 |
Sample weights. |
|
|
292 |
|
|
|
293 |
Returns |
|
|
294 |
------- |
|
|
295 |
score : float |
|
|
296 |
Score of self.predict(X) wrt. y. |
|
|
297 |
""" |
|
|
298 |
assert scoring in SCORERS.keys(), 'scoring is not in %s' % SCORERS.keys() |
|
|
299 |
scoring = get_scorer(scoring) |
|
|
300 |
|
|
|
301 |
return scoring(self, X, y, sample_weight=sample_weight) |
|
|
302 |
|
|
|
303 |
|
|
|
304 |
def plot_model(self, to_file='model.png', |
|
|
305 |
show_shapes=True, |
|
|
306 |
show_layer_names=True, |
|
|
307 |
rankdir='TB', |
|
|
308 |
expand_nested=False, |
|
|
309 |
dpi=96): |
|
|
310 |
if self.is_fit: |
|
|
311 |
tf.keras.utils.plot_model(self._model, |
|
|
312 |
to_file=to_file, |
|
|
313 |
show_shapes=show_shapes, |
|
|
314 |
show_layer_names=show_layer_names, |
|
|
315 |
rankdir=rankdir, |
|
|
316 |
expand_nested=expand_nested, |
|
|
317 |
dpi=dpi) |
|
|
318 |
else: |
|
|
319 |
print('Please fit first!') |
|
|
320 |
|
|
|
321 |
|
|
|
322 |
|
|
|
323 |
def save_model(self, model_path): |
|
|
324 |
return save_model(self, model_path) |
|
|
325 |
|
|
|
326 |
|
|
|
327 |
def load_model(self, model_path, gpuid=None): |
|
|
328 |
return load_model(model_path, gpuid=gpuid) |
|
|
329 |
|
|
|
330 |
|
|
|
331 |
def explain_model(self, mp, X, y, |
|
|
332 |
explain_format = 'global', |
|
|
333 |
apply_logrithm = False, |
|
|
334 |
apply_smoothing = False, |
|
|
335 |
kernel_size = 3, sigma = 1.2): |
|
|
336 |
''' |
|
|
337 |
Feature importance calculation |
|
|
338 |
|
|
|
339 |
Parameters |
|
|
340 |
-------------- |
|
|
341 |
mp: aggmap object |
|
|
342 |
X: trianing or test set X arrays |
|
|
343 |
y: trianing or test set y arrays |
|
|
344 |
explain_format: {'local', 'global'}, default: 'global' |
|
|
345 |
local or global feature importance, if local, then X must be one sample |
|
|
346 |
apply_logrithm: {True, False}, default: False |
|
|
347 |
whether apply a logarithm transformation on the importance values |
|
|
348 |
apply_smoothing: {True, False}, default: False |
|
|
349 |
whether apply a smoothing transformation on the importance values |
|
|
350 |
kernel_size: odd number, the kernel size to perform the smoothing |
|
|
351 |
sigma: float, sigma for gaussian smoothing |
|
|
352 |
|
|
|
353 |
Returns |
|
|
354 |
------------ |
|
|
355 |
DataFrame of feature importance |
|
|
356 |
''' |
|
|
357 |
|
|
|
358 |
if explain_format == 'global': |
|
|
359 |
explain_func = GlobalIMP |
|
|
360 |
else: |
|
|
361 |
explain_func = LocalIMP |
|
|
362 |
|
|
|
363 |
dfe = explain_func(self, mp, X, y, |
|
|
364 |
task_type = 'regression', |
|
|
365 |
sigmoidy = False, |
|
|
366 |
apply_logrithm = apply_logrithm, |
|
|
367 |
apply_smoothing = apply_smoothing, |
|
|
368 |
kernel_size = kernel_size, sigma = sigma) |
|
|
369 |
return dfe |
|
|
370 |
|
|
|
371 |
@property |
|
|
372 |
def clean(self): |
|
|
373 |
clean(self) |
|
|
374 |
|
|
|
375 |
|
|
|
376 |
class MultiClassEstimator(BaseEstimator, ClassifierMixin): |
|
|
377 |
|
|
|
378 |
""" An AggMap CNN MultiClass estimator (each sample belongs to only one class) |
|
|
379 |
|
|
|
380 |
Parameters |
|
|
381 |
---------- |
|
|
382 |
epochs : int, default = 200 |
|
|
383 |
A parameter used for training epochs. |
|
|
384 |
conv1_kernel_size: int, default = 13 |
|
|
385 |
A parameter used for the kernel size of first covolutional layers |
|
|
386 |
dense_layers: list, default = [128] |
|
|
387 |
A parameter used for the dense layers. |
|
|
388 |
batch_size: int, default: 128 |
|
|
389 |
A parameter used for the batch size. |
|
|
390 |
lr: float, default: 1e-4 |
|
|
391 |
A parameter used for the learning rate. |
|
|
392 |
loss: string or function, default: 'categorical_crossentropy' |
|
|
393 |
A parameter used for the loss function |
|
|
394 |
batch_norm: bool, default: False |
|
|
395 |
batch normalization after convolution layers. |
|
|
396 |
n_inception: int, default:2 |
|
|
397 |
Number of the inception layers. |
|
|
398 |
dense_avf: str, default is 'relu' |
|
|
399 |
activation fuction in the dense layers. |
|
|
400 |
dropout: float, default: 0 |
|
|
401 |
A parameter used for the dropout of the dense layers. |
|
|
402 |
monitor: str, default: 'val_loss' |
|
|
403 |
{'val_loss', 'val_metric'}, a monitor for model selection. |
|
|
404 |
metric: str, default: 'ACC' |
|
|
405 |
{'ROC', 'ACC', 'PRC'}, a matric parameter. |
|
|
406 |
patience: int, default: 10000 |
|
|
407 |
A parameter used for early stopping. |
|
|
408 |
gpuid: int, default: 0 |
|
|
409 |
A parameter used for specific gpu card. |
|
|
410 |
verbose: int, default: 0 |
|
|
411 |
if positive, then the log infomation of AggMapNet will be print, |
|
|
412 |
if negative, then the log infomation of orignal model will be print. |
|
|
413 |
random_state: int, default: 32 |
|
|
414 |
Random seed. |
|
|
415 |
|
|
|
416 |
|
|
|
417 |
Examples |
|
|
418 |
-------- |
|
|
419 |
>>> from aggmap import AggModel |
|
|
420 |
>>> clf = AggModel.MultiClassEstimator() |
|
|
421 |
""" |
|
|
422 |
|
|
|
423 |
|
|
|
424 |
def __init__(self, |
|
|
425 |
epochs = 200, |
|
|
426 |
conv1_kernel_size = 13, |
|
|
427 |
dense_layers = [128], |
|
|
428 |
dense_avf = 'relu', |
|
|
429 |
batch_size = 128, |
|
|
430 |
lr = 1e-4, |
|
|
431 |
loss = 'categorical_crossentropy', |
|
|
432 |
batch_norm = False, |
|
|
433 |
n_inception = 2, |
|
|
434 |
dropout = 0.0, |
|
|
435 |
monitor = 'val_loss', |
|
|
436 |
metric = 'ACC', |
|
|
437 |
patience = 10000, |
|
|
438 |
verbose = 0, |
|
|
439 |
last_avf = 'softmax', |
|
|
440 |
random_state = 32, |
|
|
441 |
gpuid=0, |
|
|
442 |
): |
|
|
443 |
|
|
|
444 |
|
|
|
445 |
self.epochs = epochs |
|
|
446 |
self.dense_layers = dense_layers |
|
|
447 |
self.conv1_kernel_size = conv1_kernel_size |
|
|
448 |
self.dense_avf = dense_avf |
|
|
449 |
self.batch_size = batch_size |
|
|
450 |
self.lr = lr |
|
|
451 |
self.loss = loss |
|
|
452 |
self.last_avf = last_avf |
|
|
453 |
|
|
|
454 |
self.batch_norm = batch_norm |
|
|
455 |
self.n_inception = n_inception |
|
|
456 |
self.dropout = dropout |
|
|
457 |
|
|
|
458 |
self.monitor = monitor |
|
|
459 |
self.metric = metric |
|
|
460 |
self.patience = patience |
|
|
461 |
|
|
|
462 |
self.gpuid = str(gpuid) |
|
|
463 |
os.environ["CUDA_VISIBLE_DEVICES"]= self.gpuid |
|
|
464 |
|
|
|
465 |
self.verbose = verbose |
|
|
466 |
self.random_state = random_state |
|
|
467 |
|
|
|
468 |
self.name = "AggMap MultiClass Estimator" |
|
|
469 |
self.is_fit = False |
|
|
470 |
#print(self.get_params()) |
|
|
471 |
self.history = {} |
|
|
472 |
self.history_model = {} |
|
|
473 |
|
|
|
474 |
if self.verbose > 0: |
|
|
475 |
self.verbose1 = self.verbose |
|
|
476 |
self.verbose2 = 0 |
|
|
477 |
elif self.verbose ==0: |
|
|
478 |
self.verbose1 = 0 |
|
|
479 |
self.verbose2 = 0 |
|
|
480 |
elif self.verbose < 0: |
|
|
481 |
self.verbose1 = 0 |
|
|
482 |
self.verbose2 = abs(self.verbose) |
|
|
483 |
|
|
|
484 |
print(self) |
|
|
485 |
|
|
|
486 |
def get_params(self, deep=True): |
|
|
487 |
|
|
|
488 |
model_paras = {"epochs": self.epochs, |
|
|
489 |
"lr":self.lr, |
|
|
490 |
"loss":self.loss, |
|
|
491 |
"conv1_kernel_size": self.conv1_kernel_size, |
|
|
492 |
"dense_layers": self.dense_layers, |
|
|
493 |
"dense_avf":self.dense_avf, |
|
|
494 |
"last_avf":self.last_avf, |
|
|
495 |
"batch_size":self.batch_size, |
|
|
496 |
"dropout":self.dropout, |
|
|
497 |
"batch_norm":self.batch_norm, |
|
|
498 |
"n_inception":self.n_inception, |
|
|
499 |
"monitor": self.monitor, |
|
|
500 |
"metric":self.metric, |
|
|
501 |
"patience":self.patience, |
|
|
502 |
"random_state":self.random_state, |
|
|
503 |
"verbose":self.verbose, |
|
|
504 |
"name":self.name, |
|
|
505 |
"gpuid": self.gpuid, |
|
|
506 |
} |
|
|
507 |
|
|
|
508 |
return model_paras |
|
|
509 |
|
|
|
510 |
|
|
|
511 |
def set_params(self, **parameters): |
|
|
512 |
for parameter, value in parameters.items(): |
|
|
513 |
setattr(self, parameter, value) |
|
|
514 |
return self |
|
|
515 |
|
|
|
516 |
|
|
|
517 |
|
|
|
518 |
def fit(self, X, y, |
|
|
519 |
X_valid = None, |
|
|
520 |
y_valid = None, |
|
|
521 |
class_weight = None, |
|
|
522 |
): |
|
|
523 |
|
|
|
524 |
# Check that X and y have correct shape |
|
|
525 |
|
|
|
526 |
if X.ndim != 4: |
|
|
527 |
raise ValueError("Found array X with dim %d. %s expected == 4." % (X.ndim, self.name)) |
|
|
528 |
|
|
|
529 |
if y.ndim != 2: |
|
|
530 |
raise ValueError("Found array y with dim %d. %s expected == 2." % (y.ndim, self.name)) |
|
|
531 |
|
|
|
532 |
# Store the classes seen during fit |
|
|
533 |
self.classes_ = unique_labels(y) |
|
|
534 |
|
|
|
535 |
self.X_ = X |
|
|
536 |
self.y_ = y |
|
|
537 |
|
|
|
538 |
if (X_valid is None) | (y_valid is None): |
|
|
539 |
|
|
|
540 |
X_valid = X |
|
|
541 |
y_valid = y |
|
|
542 |
|
|
|
543 |
np.random.seed(self.random_state) |
|
|
544 |
tf.compat.v1.set_random_seed(self.random_state) |
|
|
545 |
|
|
|
546 |
|
|
|
547 |
model = aggmodel.net._AggMapNet(X.shape[1:], |
|
|
548 |
n_outputs = y.shape[-1], |
|
|
549 |
conv1_kernel_size = self.conv1_kernel_size, |
|
|
550 |
batch_norm = self.batch_norm, |
|
|
551 |
n_inception = self.n_inception, |
|
|
552 |
dense_layers = self.dense_layers, |
|
|
553 |
dense_avf = self.dense_avf, |
|
|
554 |
dropout = self.dropout, |
|
|
555 |
last_avf = self.last_avf) |
|
|
556 |
|
|
|
557 |
|
|
|
558 |
opt = tf.keras.optimizers.Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # |
|
|
559 |
model.compile(optimizer = opt, loss = self.loss, metrics = ['accuracy']) |
|
|
560 |
|
|
|
561 |
performance = aggmodel.cbks.CLA_EarlyStoppingAndPerformance((X, y), |
|
|
562 |
(X_valid, y_valid), |
|
|
563 |
patience = self.patience, |
|
|
564 |
criteria = self.monitor, |
|
|
565 |
metric = self.metric, |
|
|
566 |
last_avf= self.last_avf, |
|
|
567 |
verbose = self.verbose1,) |
|
|
568 |
|
|
|
569 |
history = model.fit(X, y, |
|
|
570 |
batch_size=self.batch_size, |
|
|
571 |
epochs= self.epochs, verbose= self.verbose2, shuffle = True, |
|
|
572 |
validation_data = (X_valid, y_valid), class_weight = class_weight, |
|
|
573 |
callbacks=[performance]) |
|
|
574 |
|
|
|
575 |
self._model = model |
|
|
576 |
self._performance = performance |
|
|
577 |
self.history = self._performance.history |
|
|
578 |
self.history_model = history.history |
|
|
579 |
self.is_fit = True |
|
|
580 |
# Return the classifier |
|
|
581 |
return self |
|
|
582 |
|
|
|
583 |
|
|
|
584 |
|
|
|
585 |
def predict_proba(self, X): |
|
|
586 |
""" |
|
|
587 |
Probability estimates. |
|
|
588 |
The returned estimates for all classes are ordered by the |
|
|
589 |
label of classes. |
|
|
590 |
For a multi_class problem, if multi_class is set to be "multinomial" |
|
|
591 |
the softmax function is used to find the predicted probability of |
|
|
592 |
each class. |
|
|
593 |
|
|
|
594 |
Parameters |
|
|
595 |
---------- |
|
|
596 |
X : array-like of shape (n_samples, n_features) |
|
|
597 |
Vector to be scored, where `n_samples` is the number of samples and |
|
|
598 |
`n_features` is the number of features. |
|
|
599 |
|
|
|
600 |
Returns |
|
|
601 |
------- |
|
|
602 |
T : array-like of shape (n_samples, n_classes) |
|
|
603 |
Returns the probability of the sample for each class in the model, |
|
|
604 |
where classes are ordered as they are in ``self.classes_``. |
|
|
605 |
""" |
|
|
606 |
# Check is fit had been called |
|
|
607 |
check_is_fitted(self) |
|
|
608 |
|
|
|
609 |
# Input validation |
|
|
610 |
if X.ndim != 4: |
|
|
611 |
raise ValueError("Found array X with dim %d. %s expected == 4." % (X.ndim, self.name)) |
|
|
612 |
y_prob = self._model.predict(X, verbose = self.verbose) |
|
|
613 |
return y_prob |
|
|
614 |
|
|
|
615 |
|
|
|
616 |
def predict(self, X): |
|
|
617 |
probs = self.predict_proba(X) |
|
|
618 |
y_pred = pd.get_dummies(np.argmax(probs, axis=1)).values |
|
|
619 |
return y_pred |
|
|
620 |
|
|
|
621 |
|
|
|
622 |
def score(self, X, y, scoring = 'accuracy', sample_weight=None): |
|
|
623 |
"""Returns the score using the `scoring` option on the given |
|
|
624 |
test data and labels. |
|
|
625 |
|
|
|
626 |
Parameters |
|
|
627 |
---------- |
|
|
628 |
X : array-like of shape (n_samples, n_features) |
|
|
629 |
Test samples. |
|
|
630 |
y : array-like of shape (n_samples,) |
|
|
631 |
True labels for X. |
|
|
632 |
scoring: str, please refer to: https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter |
|
|
633 |
sample_weight : array-like of shape (n_samples,), default=None |
|
|
634 |
Sample weights. |
|
|
635 |
|
|
|
636 |
Returns |
|
|
637 |
------- |
|
|
638 |
score : float |
|
|
639 |
Score of self.predict(X) wrt. y. |
|
|
640 |
""" |
|
|
641 |
assert scoring in SCORERS.keys(), 'scoring is not in %s' % SCORERS.keys() |
|
|
642 |
scoring = get_scorer(scoring) |
|
|
643 |
|
|
|
644 |
return scoring(self, X, y, sample_weight=sample_weight) |
|
|
645 |
|
|
|
646 |
|
|
|
647 |
def plot_model(self, to_file='model.png', |
|
|
648 |
show_shapes=True, |
|
|
649 |
show_layer_names=True, |
|
|
650 |
rankdir='TB', |
|
|
651 |
expand_nested=False, |
|
|
652 |
dpi=96): |
|
|
653 |
if self.is_fit: |
|
|
654 |
tf.keras.utils.plot_model(self._model, |
|
|
655 |
to_file=to_file, |
|
|
656 |
show_shapes=show_shapes, |
|
|
657 |
show_layer_names=show_layer_names, |
|
|
658 |
rankdir=rankdir, |
|
|
659 |
expand_nested=expand_nested, |
|
|
660 |
dpi=dpi) |
|
|
661 |
else: |
|
|
662 |
print('Please fit first!') |
|
|
663 |
|
|
|
664 |
|
|
|
665 |
def save_model(self, model_path): |
|
|
666 |
return save_model(self, model_path) |
|
|
667 |
|
|
|
668 |
|
|
|
669 |
def load_model(self, model_path, gpuid=None): |
|
|
670 |
return load_model(model_path, gpuid=gpuid) |
|
|
671 |
|
|
|
672 |
|
|
|
673 |
def explain_model(self, mp, X, y, |
|
|
674 |
binary_task = False, |
|
|
675 |
explain_format = 'global', |
|
|
676 |
apply_logrithm = False, |
|
|
677 |
apply_smoothing = False, |
|
|
678 |
kernel_size = 3, sigma = 1.2): |
|
|
679 |
|
|
|
680 |
''' |
|
|
681 |
Feature importance calculation |
|
|
682 |
|
|
|
683 |
Parameters |
|
|
684 |
-------------- |
|
|
685 |
mp: aggmap object |
|
|
686 |
X: trianing or test set X arrays |
|
|
687 |
y: trianing or test set y arrays |
|
|
688 |
binary_task: {True, False} |
|
|
689 |
whether the task is binary, if True, the feature importance will be calculated for one class only |
|
|
690 |
explain_format: {'local', 'global'}, default: 'global' |
|
|
691 |
local or global feature importance, if local, then X must be one sample |
|
|
692 |
apply_logrithm: {True, False}, default: False |
|
|
693 |
whether apply a logarithm transformation on the importance values |
|
|
694 |
apply_smoothing: {True, False}, default: False |
|
|
695 |
whether apply a smoothing transformation on the importance values |
|
|
696 |
kernel_size: odd number, the kernel size to perform the smoothing |
|
|
697 |
sigma: float, sigma for gaussian smoothing |
|
|
698 |
|
|
|
699 |
Returns |
|
|
700 |
------------ |
|
|
701 |
DataFrame of feature importance |
|
|
702 |
''' |
|
|
703 |
if explain_format == 'global': |
|
|
704 |
explain_func = GlobalIMP |
|
|
705 |
else: |
|
|
706 |
explain_func = LocalIMP |
|
|
707 |
|
|
|
708 |
dfe = explain_func(self, mp, X, y, |
|
|
709 |
binary_task = binary_task, |
|
|
710 |
task_type = 'classification', |
|
|
711 |
sigmoidy = False, |
|
|
712 |
apply_logrithm = apply_logrithm, |
|
|
713 |
apply_smoothing = apply_smoothing, |
|
|
714 |
kernel_size = kernel_size, sigma = sigma) |
|
|
715 |
return dfe |
|
|
716 |
|
|
|
717 |
@property |
|
|
718 |
def clean(self): |
|
|
719 |
clean(self) |
|
|
720 |
|
|
|
721 |
|
|
|
722 |
class MultiLabelEstimator(BaseEstimator, ClassifierMixin): |
|
|
723 |
|
|
|
724 |
|
|
|
725 |
""" An AggMap CNN MultiLabel estimator (each sample belongs to only one class) |
|
|
726 |
|
|
|
727 |
Parameters |
|
|
728 |
---------- |
|
|
729 |
epochs : int, default = 200 |
|
|
730 |
A parameter used for training epochs. |
|
|
731 |
conv1_kernel_size: int, default = 13 |
|
|
732 |
A parameter used for the kernel size of first covolutional layers。 |
|
|
733 |
dense_layers: list, default = [128] |
|
|
734 |
A parameter used for the dense layers. |
|
|
735 |
batch_size: int, default: 128 |
|
|
736 |
A parameter used for the batch size. |
|
|
737 |
lr: float, default: 1e-4 |
|
|
738 |
A parameter used for the learning rate. |
|
|
739 |
loss: string or function, default: tf.nn.sigmoid_cross_entropy_with_logits。 |
|
|
740 |
A parameter used for the loss function |
|
|
741 |
batch_norm: bool, default: False |
|
|
742 |
batch normalization after convolution layers. |
|
|
743 |
n_inception: int, default:2 |
|
|
744 |
Number of the inception layers. |
|
|
745 |
dense_avf: str, default is 'relu' |
|
|
746 |
activation fuction in the dense layers. |
|
|
747 |
dropout: float, default: 0 |
|
|
748 |
A parameter used for the dropout of the dense layers, such as 0.1, 0.3, 0.5. |
|
|
749 |
monitor: str, default: 'val_loss' |
|
|
750 |
{'val_loss', 'val_metric'}, a monitor for model selection。 |
|
|
751 |
metric: str, default: 'ROC' |
|
|
752 |
{'ROC', 'ACC', 'PRC'}, a matric parameter。 |
|
|
753 |
patience: int, default: 10000 |
|
|
754 |
A parameter used for early stopping。 |
|
|
755 |
gpuid: int, default: 0 |
|
|
756 |
A parameter used for specific gpu card。 |
|
|
757 |
verbose: int, default: 0 |
|
|
758 |
if positive, then the log infomation of AggMapNet will be print, |
|
|
759 |
if negative, then the log infomation of orignal model will be print。 |
|
|
760 |
random_state: int, default: 32 |
|
|
761 |
Random seed |
|
|
762 |
name: str |
|
|
763 |
Model name |
|
|
764 |
|
|
|
765 |
Examples |
|
|
766 |
-------- |
|
|
767 |
>>> from aggmap import AggModel |
|
|
768 |
>>> clf = AggModel.MultiLabelEstimator() |
|
|
769 |
""" |
|
|
770 |
|
|
|
771 |
def __init__(self, |
|
|
772 |
epochs = 200, |
|
|
773 |
conv1_kernel_size = 13, |
|
|
774 |
dense_layers = [128], |
|
|
775 |
dense_avf = 'relu', |
|
|
776 |
batch_size = 128, |
|
|
777 |
lr = 1e-4, |
|
|
778 |
loss = tf.nn.sigmoid_cross_entropy_with_logits, |
|
|
779 |
batch_norm = False, |
|
|
780 |
n_inception = 2, |
|
|
781 |
dropout = 0.0, |
|
|
782 |
monitor = 'val_loss', |
|
|
783 |
metric = 'ROC', |
|
|
784 |
patience = 10000, |
|
|
785 |
verbose = 0, |
|
|
786 |
random_state = 32, |
|
|
787 |
gpuid = 0, |
|
|
788 |
): |
|
|
789 |
|
|
|
790 |
|
|
|
791 |
self.epochs = epochs |
|
|
792 |
self.dense_layers = dense_layers |
|
|
793 |
self.conv1_kernel_size = conv1_kernel_size |
|
|
794 |
self.dense_avf = dense_avf |
|
|
795 |
self.batch_size = batch_size |
|
|
796 |
self.lr = lr |
|
|
797 |
self.loss = loss |
|
|
798 |
self.batch_norm = batch_norm |
|
|
799 |
self.n_inception = n_inception |
|
|
800 |
self.dropout = dropout |
|
|
801 |
self.monitor = monitor |
|
|
802 |
self.metric = metric |
|
|
803 |
self.patience = patience |
|
|
804 |
self.gpuid = str(gpuid) |
|
|
805 |
os.environ["CUDA_VISIBLE_DEVICES"]= self.gpuid |
|
|
806 |
|
|
|
807 |
self.verbose = verbose |
|
|
808 |
self.random_state = random_state |
|
|
809 |
self.is_fit = False |
|
|
810 |
self.name = "AggMap MultiLabels Estimator" |
|
|
811 |
|
|
|
812 |
#print(self.get_params()) |
|
|
813 |
self.history = {} |
|
|
814 |
self.history_model = {} |
|
|
815 |
|
|
|
816 |
if self.verbose > 0: |
|
|
817 |
self.verbose1 = self.verbose |
|
|
818 |
self.verbose2 = 0 |
|
|
819 |
elif self.verbose ==0: |
|
|
820 |
self.verbose1 = 0 |
|
|
821 |
self.verbose2 = 0 |
|
|
822 |
elif self.verbose < 0: |
|
|
823 |
self.verbose1 = 0 |
|
|
824 |
self.verbose2 = abs(self.verbose) |
|
|
825 |
print(self) |
|
|
826 |
|
|
|
827 |
def get_params(self, deep=True): |
|
|
828 |
|
|
|
829 |
model_paras = {"epochs": self.epochs, |
|
|
830 |
"lr":self.lr, |
|
|
831 |
"loss":self.loss, |
|
|
832 |
"conv1_kernel_size": self.conv1_kernel_size, |
|
|
833 |
"dense_layers": self.dense_layers, |
|
|
834 |
"dense_avf":self.dense_avf, |
|
|
835 |
"batch_size":self.batch_size, |
|
|
836 |
"dropout":self.dropout, |
|
|
837 |
"batch_norm":self.batch_norm, |
|
|
838 |
"n_inception":self.n_inception, |
|
|
839 |
"monitor": self.monitor, |
|
|
840 |
"metric":self.metric, |
|
|
841 |
"patience":self.patience, |
|
|
842 |
"random_state":self.random_state, |
|
|
843 |
"verbose":self.verbose, |
|
|
844 |
"name":self.name, |
|
|
845 |
"gpuid": self.gpuid, |
|
|
846 |
} |
|
|
847 |
|
|
|
848 |
return model_paras |
|
|
849 |
|
|
|
850 |
|
|
|
851 |
def set_params(self, **parameters): |
|
|
852 |
for parameter, value in parameters.items(): |
|
|
853 |
setattr(self, parameter, value) |
|
|
854 |
return self |
|
|
855 |
|
|
|
856 |
|
|
|
857 |
|
|
|
858 |
def fit(self, X, y, X_valid = None, y_valid = None): |
|
|
859 |
|
|
|
860 |
# Check that X and y have correct shape |
|
|
861 |
|
|
|
862 |
if X.ndim != 4: |
|
|
863 |
raise ValueError("Found array X with dim %d. %s expected == 4." % (X.ndim, self.name)) |
|
|
864 |
|
|
|
865 |
if y.ndim != 2: |
|
|
866 |
raise ValueError("Found array y with dim %d. %s expected == 2." % (y.ndim, self.name)) |
|
|
867 |
|
|
|
868 |
# Store the classes seen during fit |
|
|
869 |
self.classes_ = unique_labels(y) |
|
|
870 |
|
|
|
871 |
self.X_ = X |
|
|
872 |
self.y_ = y |
|
|
873 |
|
|
|
874 |
if (X_valid is None) | (y_valid is None): |
|
|
875 |
X_valid = X |
|
|
876 |
y_valid = y |
|
|
877 |
|
|
|
878 |
np.random.seed(self.random_state) |
|
|
879 |
tf.compat.v1.set_random_seed(self.random_state) |
|
|
880 |
model = aggmodel.net._AggMapNet(X.shape[1:], |
|
|
881 |
n_outputs = y.shape[-1], |
|
|
882 |
conv1_kernel_size = self.conv1_kernel_size, |
|
|
883 |
batch_norm = self.batch_norm, |
|
|
884 |
n_inception = self.n_inception, |
|
|
885 |
dense_layers = self.dense_layers, |
|
|
886 |
dense_avf = self.dense_avf, |
|
|
887 |
dropout = self.dropout, |
|
|
888 |
last_avf = None) |
|
|
889 |
|
|
|
890 |
opt = tf.keras.optimizers.Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # |
|
|
891 |
model.compile(optimizer = opt, loss = self.loss) |
|
|
892 |
performance = aggmodel.cbks.CLA_EarlyStoppingAndPerformance((X, y), |
|
|
893 |
(X_valid, y_valid), |
|
|
894 |
patience = self.patience, |
|
|
895 |
criteria = self.monitor, |
|
|
896 |
metric = self.metric, |
|
|
897 |
last_avf = None, |
|
|
898 |
verbose = self.verbose1,) |
|
|
899 |
|
|
|
900 |
history = model.fit(X, y, |
|
|
901 |
batch_size=self.batch_size, |
|
|
902 |
epochs= self.epochs, verbose= self.verbose2, shuffle = True, |
|
|
903 |
validation_data = (X_valid, y_valid), |
|
|
904 |
callbacks=[performance]) |
|
|
905 |
|
|
|
906 |
self._model = model |
|
|
907 |
self._performance = performance |
|
|
908 |
self.history = self._performance.history |
|
|
909 |
self.history_model = history.history |
|
|
910 |
self.is_fit = True |
|
|
911 |
|
|
|
912 |
return self |
|
|
913 |
|
|
|
914 |
|
|
|
915 |
|
|
|
916 |
def predict_proba(self, X): |
|
|
917 |
""" |
|
|
918 |
Probability estimates. |
|
|
919 |
The returned estimates for all classes are ordered by the |
|
|
920 |
label of classes. |
|
|
921 |
For a multi_class problem, if multi_class is set to be "multinomial" |
|
|
922 |
the softmax function is used to find the predicted probability of |
|
|
923 |
each class. |
|
|
924 |
|
|
|
925 |
Parameters |
|
|
926 |
---------- |
|
|
927 |
X : array-like of shape (n_samples, n_features) |
|
|
928 |
Vector to be scored, where `n_samples` is the number of samples and |
|
|
929 |
`n_features` is the number of features. |
|
|
930 |
|
|
|
931 |
Returns |
|
|
932 |
------- |
|
|
933 |
T : array-like of shape (n_samples, n_classes) |
|
|
934 |
Returns the probability of the sample for each class in the model, |
|
|
935 |
where classes are ordered as they are in ``self.classes_``. |
|
|
936 |
""" |
|
|
937 |
# Check is fit had been called |
|
|
938 |
check_is_fitted(self) |
|
|
939 |
|
|
|
940 |
# Input validation |
|
|
941 |
if X.ndim != 4: |
|
|
942 |
raise ValueError("Found array X with dim %d. %s expected == 4." % (X.ndim, self.name)) |
|
|
943 |
y_prob = self._performance.sigmoid(self._model.predict(X, verbose = self.verbose)) |
|
|
944 |
return y_prob |
|
|
945 |
|
|
|
946 |
|
|
|
947 |
def predict(self, X): |
|
|
948 |
y_pred = np.round(self.predict_proba(X)) |
|
|
949 |
return y_pred |
|
|
950 |
|
|
|
951 |
|
|
|
952 |
|
|
|
953 |
def score(self, X, y, scoring = 'accuracy', sample_weight=None): |
|
|
954 |
"""Returns the score using the `scoring` option on the given |
|
|
955 |
test data and labels. |
|
|
956 |
|
|
|
957 |
Parameters |
|
|
958 |
---------- |
|
|
959 |
X : array-like of shape (n_samples, n_features) |
|
|
960 |
Test samples. |
|
|
961 |
y : array-like of shape (n_samples,) |
|
|
962 |
True labels for X. |
|
|
963 |
scoring: str, please refer to: https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter |
|
|
964 |
sample_weight : array-like of shape (n_samples,), default=None |
|
|
965 |
Sample weights. |
|
|
966 |
|
|
|
967 |
Returns |
|
|
968 |
------- |
|
|
969 |
score : float |
|
|
970 |
Score of self.predict(X) wrt. y. |
|
|
971 |
""" |
|
|
972 |
assert scoring in SCORERS.keys(), 'scoring is not in %s' % SCORERS.keys() |
|
|
973 |
scoring = get_scorer(scoring) |
|
|
974 |
|
|
|
975 |
return scoring(self, X, y, sample_weight=sample_weight) |
|
|
976 |
|
|
|
977 |
def plot_model(self, to_file='model.png', |
|
|
978 |
show_shapes=True, |
|
|
979 |
show_layer_names=True, |
|
|
980 |
rankdir='TB', |
|
|
981 |
expand_nested=False, |
|
|
982 |
dpi=96): |
|
|
983 |
if self.is_fit: |
|
|
984 |
tf.keras.utils.plot_model(self._model, |
|
|
985 |
to_file=to_file, |
|
|
986 |
show_shapes=show_shapes, |
|
|
987 |
show_layer_names=show_layer_names, |
|
|
988 |
rankdir=rankdir, |
|
|
989 |
expand_nested=expand_nested, |
|
|
990 |
dpi=dpi) |
|
|
991 |
else: |
|
|
992 |
print('Please fit first!') |
|
|
993 |
|
|
|
994 |
|
|
|
995 |
def save_model(self, model_path): |
|
|
996 |
return save_model(self, model_path) |
|
|
997 |
|
|
|
998 |
|
|
|
999 |
def load_model(self, model_path, gpuid=None): |
|
|
1000 |
return load_model(model_path, gpuid=gpuid) |
|
|
1001 |
|
|
|
1002 |
|
|
|
1003 |
def explain_model(self, mp, |
|
|
1004 |
X, |
|
|
1005 |
y, |
|
|
1006 |
explain_format = 'global', |
|
|
1007 |
apply_logrithm = False, |
|
|
1008 |
apply_smoothing = False, |
|
|
1009 |
kernel_size = 3, sigma = 1.2): |
|
|
1010 |
''' |
|
|
1011 |
Feature importance calculation. |
|
|
1012 |
|
|
|
1013 |
Parameters |
|
|
1014 |
-------------- |
|
|
1015 |
mp: aggmap object |
|
|
1016 |
X: trianing or test set X arrays |
|
|
1017 |
y: trianing or test set y arrays |
|
|
1018 |
whether the task is binary, if True, the feature importance will be calculated for one class only |
|
|
1019 |
explain_format: {'local', 'global'}, default: 'global' |
|
|
1020 |
local or global feature importance, if local, then X must be one sample. |
|
|
1021 |
apply_logrithm: {True, False}, default: False. |
|
|
1022 |
whether apply a logarithm transformation on the importance values. |
|
|
1023 |
apply_smoothing: {True, False}, default: False. |
|
|
1024 |
whether apply a smoothing transformation on the importance values. |
|
|
1025 |
kernel_size: odd number, the kernel size to perform the smoothing. |
|
|
1026 |
sigma: float, sigma for gaussian smoothing. |
|
|
1027 |
|
|
|
1028 |
Returns |
|
|
1029 |
------------ |
|
|
1030 |
DataFrame of feature importance |
|
|
1031 |
''' |
|
|
1032 |
if explain_format == 'global': |
|
|
1033 |
explain_func = GlobalIMP |
|
|
1034 |
else: |
|
|
1035 |
explain_func = LocalIMP |
|
|
1036 |
|
|
|
1037 |
dfe = explain_func(self, mp, X, y, |
|
|
1038 |
task_type = 'classification', |
|
|
1039 |
binary_task = False, |
|
|
1040 |
sigmoidy = True, |
|
|
1041 |
apply_logrithm = apply_logrithm, |
|
|
1042 |
apply_smoothing = apply_smoothing, |
|
|
1043 |
kernel_size = kernel_size, sigma = sigma) |
|
|
1044 |
return dfe |
|
|
1045 |
|
|
|
1046 |
@property |
|
|
1047 |
def clean(self): |
|
|
1048 |
clean(self) |
|
|
1049 |
|
|
|
1050 |
|