|
a |
|
b/experimental/get_counts.py |
|
|
1 |
import brambox as bb |
|
|
2 |
import os |
|
|
3 |
from os.path import join, basename |
|
|
4 |
from pathflowai.utils import load_sql_df, npy2da, df2sql |
|
|
5 |
import skimage |
|
|
6 |
import dask, dask.array as da, pandas as pd, numpy as np |
|
|
7 |
import argparse |
|
|
8 |
from scipy import ndimage |
|
|
9 |
from scipy.ndimage.measurements import label |
|
|
10 |
import pickle |
|
|
11 |
from dask.distributed import Client |
|
|
12 |
from multiprocessing import Pool |
|
|
13 |
from functools import reduce |
|
|
14 |
|
|
|
15 |
def count_cells(m, num_classes=3): |
|
|
16 |
lbls,n_lbl=label(m) |
|
|
17 |
obj_labels=np.zeros(num_classes) |
|
|
18 |
for i in range(1,num_classes+1): |
|
|
19 |
obj_labels[i-1]=len(np.unique(lbls[m==i].flatten())) |
|
|
20 |
return obj_labels |
|
|
21 |
|
|
|
22 |
if __name__=='__main__': |
|
|
23 |
p=argparse.ArgumentParser() |
|
|
24 |
p.add_argument('--num_classes',default=4,type=int) |
|
|
25 |
p.add_argument('--patch_size',default=512,type=int) |
|
|
26 |
p.add_argument('--n_workers',default=40,type=int) |
|
|
27 |
p.add_argument('--p_sample',default=0.7,type=float) |
|
|
28 |
p.add_argument('--input_dir',default='inputs',type=str) |
|
|
29 |
p.add_argument('--patch_info_file',default='cell_info.db',type=str) |
|
|
30 |
p.add_argument('--reference_mask',default='reference_mask.npy',type=str) |
|
|
31 |
#c=Client() |
|
|
32 |
# add mode to just use own extracted boudning boxes or from seg, maybe from histomicstk |
|
|
33 |
|
|
|
34 |
args=p.parse_args() |
|
|
35 |
num_classes=args.num_classes |
|
|
36 |
n_workers=args.n_workers |
|
|
37 |
input_dir=args.input_dir |
|
|
38 |
patch_info_file=args.patch_info_file |
|
|
39 |
patch_size=args.patch_size |
|
|
40 |
np.random.seed(42) |
|
|
41 |
reference_mask=args.reference_mask |
|
|
42 |
|
|
|
43 |
patch_info=load_sql_df(patch_info_file, patch_size) |
|
|
44 |
IDs=patch_info['ID'].unique() |
|
|
45 |
#slides = {slide:da.from_zarr(join(input_dir,'{}.zarr'.format(slide))) for slide in IDs} |
|
|
46 |
masks = {mask:npy2da(join(input_dir,'{}_mask.npy'.format(mask))) for mask in IDs} |
|
|
47 |
|
|
|
48 |
def process_chunk(patch_info_sub): |
|
|
49 |
patch_info_sub=patch_info_sub.reset_index(drop=True) |
|
|
50 |
counts=[] |
|
|
51 |
for i in range(patch_info_sub.shape[0]): |
|
|
52 |
#print(i) |
|
|
53 |
patch=patch_info_sub.iloc[i] |
|
|
54 |
ID,x,y,patch_size2=patch[['ID','x','y','patch_size']].tolist() |
|
|
55 |
m=masks[ID][x:x+patch_size2,y:y+patch_size2] |
|
|
56 |
counts.append(dask.delayed(count_cells)(m, num_classes=num_classes)) |
|
|
57 |
|
|
|
58 |
return dask.compute(*counts,scheduler='threading') |
|
|
59 |
|
|
|
60 |
patch_info_subs=np.array_split(patch_info,n_workers) |
|
|
61 |
|
|
|
62 |
p=Pool(n_workers) |
|
|
63 |
|
|
|
64 |
counts=reduce(lambda x,y:x+y,p.map(process_chunk,patch_info_subs)) |
|
|
65 |
|
|
|
66 |
#bbox_dfs=dask.compute(*bbox_dfs,scheduler='processes') |
|
|
67 |
|
|
|
68 |
counts=pd.DataFrame(np.vstack(counts)) |
|
|
69 |
|
|
|
70 |
patch_info=pd.concat([patch_info[['ID','x','y','patch_size','annotation']].reset_index(drop=True),counts.reset_index(drop=True)],axis=1).reset_index() |
|
|
71 |
print(patch_info) |
|
|
72 |
|
|
|
73 |
df2sql(patch_info, 'counts_test.db', patch_size, mode='replace') |