|
a |
|
b/scripts/predict.py |
|
|
1 |
import sys |
|
|
2 |
sys.path.append('../') |
|
|
3 |
from config import models, index_to_label, acronyms_to_entities, MAX_LENGTH |
|
|
4 |
|
|
|
5 |
import tensorflow as tf |
|
|
6 |
|
|
|
7 |
from scripts.utils import predict |
|
|
8 |
|
|
|
9 |
from keras import backend as K |
|
|
10 |
|
|
|
11 |
def precision(y_true, y_pred): |
|
|
12 |
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) |
|
|
13 |
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) |
|
|
14 |
_precision = true_positives / (predicted_positives + K.epsilon()) |
|
|
15 |
return _precision |
|
|
16 |
|
|
|
17 |
def recall(y_true, y_pred): |
|
|
18 |
"""Compute recall metric""" |
|
|
19 |
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) |
|
|
20 |
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) |
|
|
21 |
return true_positives / (possible_positives + K.epsilon()) |
|
|
22 |
|
|
|
23 |
def f1_score(y_true, y_pred): |
|
|
24 |
"""Compute f1-score metric""" |
|
|
25 |
_precision = precision(y_true, y_pred) |
|
|
26 |
_recall = recall(y_true, y_pred) |
|
|
27 |
f1_score = 2 * ((_precision * _recall) / (_precision + _recall + K.epsilon())) |
|
|
28 |
return f1_score |
|
|
29 |
|
|
|
30 |
def NER(model_name, text): |
|
|
31 |
# Print the arguments |
|
|
32 |
print("Model provided: ", models[model_name]['title']) |
|
|
33 |
model_path = models[model_name]['path'] |
|
|
34 |
# Register the custom metric function |
|
|
35 |
tf.keras.utils.get_custom_objects()[precision.__name__] = precision |
|
|
36 |
tf.keras.utils.get_custom_objects()[recall.__name__] = recall |
|
|
37 |
tf.keras.utils.get_custom_objects()[f1_score.__name__] = f1_score |
|
|
38 |
model = tf.keras.models.load_model(model_path) |
|
|
39 |
|
|
|
40 |
predict(text, model, index_to_label, acronyms_to_entities, MAX_LENGTH) |