|
a |
|
b/eval_utils.py |
|
|
1 |
# https://github.com/ptirupat/ThoughtViz |
|
|
2 |
import os |
|
|
3 |
import os.path |
|
|
4 |
import tarfile |
|
|
5 |
|
|
|
6 |
import numpy as np |
|
|
7 |
from six.moves import urllib |
|
|
8 |
import tensorflow.compat.v1 as tf |
|
|
9 |
tf.disable_v2_behavior() |
|
|
10 |
import math |
|
|
11 |
import sys |
|
|
12 |
from tensorflow.keras.utils import to_categorical |
|
|
13 |
from tqdm import tqdm |
|
|
14 |
|
|
|
15 |
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' |
|
|
16 |
os.environ["CUDA_DEVICE_ORDER"]= "PCI_BUS_ID" |
|
|
17 |
os.environ["CUDA_VISIBLE_DEVICES"]= '0' |
|
|
18 |
|
|
|
19 |
MODEL_DIR = 'tmp/imagenet' |
|
|
20 |
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' |
|
|
21 |
softmax = None |
|
|
22 |
|
|
|
23 |
|
|
|
24 |
# Call this function with list of images. Each of elements should be a |
|
|
25 |
# numpy array with values ranging from 0 to 255. |
|
|
26 |
def get_inception_score(images, splits=10): |
|
|
27 |
assert(type(images) == list) |
|
|
28 |
assert(type(images[0]) == np.ndarray) |
|
|
29 |
assert(len(images[0].shape) == 3) |
|
|
30 |
assert(np.max(images[0]) > 10) |
|
|
31 |
assert(np.min(images[0]) >= 0.0) |
|
|
32 |
inps = [] |
|
|
33 |
for img in images: |
|
|
34 |
img = img.astype(np.float32) |
|
|
35 |
inps.append(np.expand_dims(img, 0)) |
|
|
36 |
bs = 1 |
|
|
37 |
with tf.Session() as sess: |
|
|
38 |
preds = [] |
|
|
39 |
n_batches = int(math.ceil(float(len(inps)) / float(bs))) |
|
|
40 |
for i in tqdm(range(n_batches)): |
|
|
41 |
# sys.stdout.write(".") |
|
|
42 |
# sys.stdout.flush() |
|
|
43 |
inp = inps[(i * bs):min((i + 1) * bs, len(inps))] |
|
|
44 |
inp = np.concatenate(inp, 0) |
|
|
45 |
pred = sess.run(softmax, {'ExpandDims:0': inp}) |
|
|
46 |
preds.append(pred) |
|
|
47 |
preds = np.concatenate(preds, 0) |
|
|
48 |
scores = [] |
|
|
49 |
for i in range(splits): |
|
|
50 |
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] |
|
|
51 |
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) |
|
|
52 |
kl = np.mean(np.sum(kl, 1)) |
|
|
53 |
scores.append(np.exp(kl)) |
|
|
54 |
return np.mean(scores), np.std(scores) |
|
|
55 |
|
|
|
56 |
|
|
|
57 |
# This function is called automatically. |
|
|
58 |
def _init_inception(): |
|
|
59 |
global softmax |
|
|
60 |
if not os.path.exists(MODEL_DIR): |
|
|
61 |
os.makedirs(MODEL_DIR) |
|
|
62 |
filename = DATA_URL.split('/')[-1] |
|
|
63 |
filepath = os.path.join(MODEL_DIR, filename) |
|
|
64 |
# if not os.path.exists(filepath): |
|
|
65 |
# def _progress(count, block_size, total_size): |
|
|
66 |
# sys.stdout.write('\r>> Downloading %s %.1f%%' % ( |
|
|
67 |
# filename, float(count * block_size) / float(total_size) * 100.0)) |
|
|
68 |
# sys.stdout.flush() |
|
|
69 |
# filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) |
|
|
70 |
# print() |
|
|
71 |
# statinfo = os.stat(filepath) |
|
|
72 |
# print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') |
|
|
73 |
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) |
|
|
74 |
with tf.compat.v2.io.gfile.GFile(os.path.join( |
|
|
75 |
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f: |
|
|
76 |
graph_def = tf.compat.v1.GraphDef() |
|
|
77 |
graph_def.ParseFromString(f.read()) |
|
|
78 |
_ = tf.import_graph_def(graph_def, name='') |
|
|
79 |
# Works with an arbitrary minibatch size. |
|
|
80 |
with tf.Session() as sess: |
|
|
81 |
pool3 = sess.graph.get_tensor_by_name('pool_3:0') |
|
|
82 |
ops = pool3.graph.get_operations() |
|
|
83 |
for op_idx, op in enumerate(ops): |
|
|
84 |
for o in op.outputs: |
|
|
85 |
shape = o.get_shape() |
|
|
86 |
shape = [s.value for s in shape] |
|
|
87 |
new_shape = [] |
|
|
88 |
for j, s in enumerate(shape): |
|
|
89 |
if s == 1 and j == 0: |
|
|
90 |
new_shape.append(None) |
|
|
91 |
else: |
|
|
92 |
new_shape.append(s) |
|
|
93 |
o.set_shape(tf.TensorShape(new_shape)) |
|
|
94 |
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1] |
|
|
95 |
logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w) |
|
|
96 |
softmax = tf.nn.softmax(logits) |
|
|
97 |
|
|
|
98 |
|
|
|
99 |
if softmax is None: |
|
|
100 |
_init_inception() |
|
|
101 |
|
|
|
102 |
|