|
a |
|
b/usage/usage.py |
|
|
1 |
import os |
|
|
2 |
import time |
|
|
3 |
import glob |
|
|
4 |
import sys |
|
|
5 |
import numpy as np |
|
|
6 |
import openslide |
|
|
7 |
import matplotlib.pyplot as plt |
|
|
8 |
import matplotlib.gridspec as gridspec |
|
|
9 |
from mpl_toolkits.axes_grid1 import make_axes_locatable |
|
|
10 |
import cv2 |
|
|
11 |
kernel = np.ones((5,5), np.uint8) |
|
|
12 |
|
|
|
13 |
from PIL import Image |
|
|
14 |
sys.path.append('..') |
|
|
15 |
from DigiPathAI.Segmentation import getSegmentation |
|
|
16 |
|
|
|
17 |
digestpath_imgs = ['../examples/colon-cancer-1.tiff'] |
|
|
18 |
|
|
|
19 |
paip_imgs = ['../examples/examples/tcga/liver-1.svs'] |
|
|
20 |
|
|
|
21 |
tcga_imgs = ['../examples/examples/tcga/TCGA-CM.svs'] |
|
|
22 |
|
|
|
23 |
camelyon_imgs = ['../examples/Camelyon_16_Test_samples/camelyon-1.tif'] |
|
|
24 |
|
|
|
25 |
|
|
|
26 |
models = ['dense', 'inception', 'deeplabv3', 'ensemble', 'epistemic'] |
|
|
27 |
|
|
|
28 |
def iou(gt, mask): |
|
|
29 |
gt = np.uint8(np.array(gt>0.1)) |
|
|
30 |
mask = np.uint8(np.array(mask>0.1)) |
|
|
31 |
nr = np.sum(gt*mask)*2.0 |
|
|
32 |
dr = np.sum(gt + mask)*1.0 |
|
|
33 |
return nr/dr |
|
|
34 |
|
|
|
35 |
for path in tcga_imgs: |
|
|
36 |
ext = os.path.splitext(path)[1] |
|
|
37 |
base_path = os.path.splitext(path)[0] |
|
|
38 |
|
|
|
39 |
print (ext, base_path, base_path[:-5]) |
|
|
40 |
quick = True |
|
|
41 |
tta_list = ['FLIP_LEFT_RIGHT', 'ROTATE_90'] #, 'ROTATE_180', 'ROTATE_270'] |
|
|
42 |
for model in models: |
|
|
43 |
|
|
|
44 |
print (model, quick, path, "======================================") |
|
|
45 |
if model == 'ensemble': |
|
|
46 |
quick = False |
|
|
47 |
elif model == 'epistemic': |
|
|
48 |
quick = False |
|
|
49 |
tta_list = None |
|
|
50 |
""" |
|
|
51 |
getSegmentation(path, |
|
|
52 |
patch_size = 512, |
|
|
53 |
stride_size = 512, |
|
|
54 |
batch_size = 4, |
|
|
55 |
quick = quick, |
|
|
56 |
tta_list = tta_list, |
|
|
57 |
crf = False, |
|
|
58 |
mask_path = base_path + '-DigiPathAI_{}_mask'.format(model) + '.tiff', |
|
|
59 |
uncertainty_path = base_path + '-DigiPathAI_{}_uncertainty'.format(model)+ '.tiff', |
|
|
60 |
status = None, |
|
|
61 |
mask_level = 4, |
|
|
62 |
model = model, |
|
|
63 |
mode = 'breast') |
|
|
64 |
|
|
|
65 |
""" |
|
|
66 |
slide = openslide.OpenSlide(path) |
|
|
67 |
level = len(slide.level_dimensions) - 1 |
|
|
68 |
img_dimensions = slide.level_dimensions[-1] |
|
|
69 |
img = np.array(slide.read_region((0,0), level, img_dimensions).convert('RGB')) |
|
|
70 |
|
|
|
71 |
mask = openslide.OpenSlide(base_path + '-DigiPathAI_{}_mask'.format(model) + '.tiff') |
|
|
72 |
level = np.where([1 if ((dim[0] == img_dimensions[0])*(dim[1] == img_dimensions[1])) else 0 for dim in mask.level_dimensions])[0] |
|
|
73 |
mask = np.array(mask.read_region((0,0), level, img_dimensions).convert('L')) |
|
|
74 |
mask = cv2.dilate(mask, kernel, iterations=2) |
|
|
75 |
|
|
|
76 |
uncertainty = openslide.OpenSlide(base_path + '-DigiPathAI_{}_uncertainty'.format(model) + '.tiff') |
|
|
77 |
level = np.where([1 if ((dim[0] == img_dimensions[0])*(dim[1] == img_dimensions[1])) else 0 for dim in uncertainty.level_dimensions])[0] |
|
|
78 |
uncertainty = np.array(uncertainty.read_region((0,0), level, img_dimensions).convert('L')) |
|
|
79 |
|
|
|
80 |
gt = openslide.OpenSlide(glob.glob(base_path + '-gt*')[0]) |
|
|
81 |
level = np.where([1 if ((dim[0] == img_dimensions[0])*(dim[1] == img_dimensions[1])) else 0 for dim in gt.level_dimensions])[0] |
|
|
82 |
gt = np.array(gt.read_region((0,0), level, img_dimensions).convert('L'))*255 |
|
|
83 |
gt = np.array(Image.fromarray(gt).resize(img_dimensions, Image.NEAREST)) |
|
|
84 |
|
|
|
85 |
mask = np.array(Image.fromarray(mask).resize(img_dimensions, Image.NEAREST)) |
|
|
86 |
uncertainty = np.array(Image.fromarray(uncertainty).resize(img_dimensions))/255.0 |
|
|
87 |
# gt = np.array(Image.open(base_path + 'gt.jpg').convert('L').resize(img_dimensions)) |
|
|
88 |
|
|
|
89 |
|
|
|
90 |
|
|
|
91 |
print ("path: {}, model: {}, IoU: {}".format(path, model, iou(gt, mask))) |
|
|
92 |
|
|
|
93 |
|
|
|
94 |
fig, ax = plt.subplots(1, 4, figsize=(10, 40)) |
|
|
95 |
im_ = ax[0].imshow(img) |
|
|
96 |
ax[0].set_xticklabels([]) |
|
|
97 |
ax[0].set_yticklabels([]) |
|
|
98 |
ax[0].set_xticks([]) |
|
|
99 |
ax[0].set_yticks([]) |
|
|
100 |
ax[0].set_aspect('equal') |
|
|
101 |
ax[0].tick_params(bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off' ) |
|
|
102 |
|
|
|
103 |
|
|
|
104 |
im_ = ax[1].imshow(img) |
|
|
105 |
gt_ = ax[1].imshow(gt, alpha = 0.5, cmap='gray') |
|
|
106 |
ax[1].set_xticklabels([]) |
|
|
107 |
ax[1].set_yticklabels([]) |
|
|
108 |
ax[1].set_xticks([]) |
|
|
109 |
ax[1].set_yticks([]) |
|
|
110 |
ax[1].set_aspect('equal') |
|
|
111 |
ax[1].tick_params(bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off' ) |
|
|
112 |
|
|
|
113 |
im_ = ax[2].imshow(img) |
|
|
114 |
mask_ = ax[2].imshow(mask, alpha = 0.5, cmap='gray') |
|
|
115 |
ax[2].set_xticklabels([]) |
|
|
116 |
ax[2].set_yticklabels([]) |
|
|
117 |
ax[2].set_xticks([]) |
|
|
118 |
ax[2].set_yticks([]) |
|
|
119 |
ax[2].set_aspect('equal') |
|
|
120 |
ax[2].tick_params(bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off' ) |
|
|
121 |
|
|
|
122 |
im_ = ax[3].imshow(img) |
|
|
123 |
uncertain_ = ax[3].imshow(uncertainty, alpha = 0.5, cmap=plt.cm.RdBu_r) |
|
|
124 |
ax[3].set_xticklabels([]) |
|
|
125 |
ax[3].set_yticklabels([]) |
|
|
126 |
ax[3].set_xticks([]) |
|
|
127 |
ax[3].set_yticks([]) |
|
|
128 |
ax[3].set_aspect('equal') |
|
|
129 |
ax[3].tick_params(bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off' ) |
|
|
130 |
|
|
|
131 |
cax = fig.add_axes([ax[3].get_position().x1 + 0.01, |
|
|
132 |
ax[3].get_position().y0, |
|
|
133 |
0.01, |
|
|
134 |
ax[3].get_position().y1-ax[3].get_position().y0]) |
|
|
135 |
fig.colorbar(uncertain_, cax=cax) |
|
|
136 |
|
|
|
137 |
plt.savefig(base_path+'DigiPath_Results_{}.png'.format(model), bbox_inches='tight') |
|
|
138 |
|