Diff of /sybil/datasets/utils.py [000000] .. [d9566e]

Switch to unified view

a b/sybil/datasets/utils.py
1
import numpy as np
2
import math
3
4
# Error Messages
5
METAFILE_NOTFOUND_ERR = "Metadata file {} could not be parsed! Exception: {}!"
6
LOAD_FAIL_MSG = "Failed to load image: {}\nException: {}"
7
# Constants
8
IMG_PAD_TOKEN = "<PAD>"
9
VOXEL_SPACING = (0.703125, 0.703125, 2.5)
10
CENSORING_DIST = {
11
    "0": 0.9851933387778276,
12
    "1": 0.9748326267838882,
13
    "2": 0.9659936099043455,
14
    "3": 0.9587266943913851,
15
    "4": 0.952360797355704,
16
    "5": 0.9461857341570268,
17
}
18
19
20
def order_slices(img_paths, slice_locations):
21
    sorted_ids = np.argsort(slice_locations)
22
    sorted_img_paths = np.array(img_paths)[sorted_ids].tolist()
23
    sorted_slice_locs = np.sort(slice_locations).tolist()
24
    return sorted_img_paths, sorted_slice_locs
25
26
27
def assign_splits(meta, args):
28
    for idx in range(len(meta)):
29
        meta[idx]["split"] = np.random.choice(
30
            ["train", "dev", "test"], p=args.split_probs
31
        )
32
33
34
def get_scaled_annotation_mask(additional, args, scale_annotation=True):
35
    """
36
    Construct bounding box masks for annotations
37
    Args:
38
        - additional['image_annotations']: list of dicts { 'x', 'y', 'width', 'height' }, where bounding box coordinates are scaled [0,1].
39
        - args
40
    Returns:
41
        - mask of same size as input image, filled in where bounding box was drawn. If additional['image_annotations'] = None, return empty mask. Values correspond to how much of a pixel lies inside the bounding box, as a fraction of the bounding box's area
42
    """
43
    H, W = args.img_size
44
    mask = np.zeros((H, W))
45
    if additional["image_annotations"] is None:
46
        return mask
47
48
    for annotation in additional["image_annotations"]:
49
        single_mask = np.zeros((H, W))
50
        x_left, y_top = annotation["x"] * W, annotation["y"] * H
51
        x_right, y_bottom = (
52
            x_left + annotation["width"] * W,
53
            y_top + annotation["height"] * H,
54
        )
55
56
        # pixels completely inside bounding box
57
        x_quant_left, y_quant_top = math.ceil(x_left), math.ceil(y_top)
58
        x_quant_right, y_quant_bottom = math.floor(x_right), math.floor(y_bottom)
59
60
        # excess area along edges
61
        dx_left = x_quant_left - x_left
62
        dx_right = x_right - x_quant_right
63
        dy_top = y_quant_top - y_top
64
        dy_bottom = y_bottom - y_quant_bottom
65
66
        # fill in corners first in case they are over-written later by greater true intersection
67
        # corners
68
        single_mask[math.floor(y_top), math.floor(x_left)] = dx_left * dy_top
69
        single_mask[math.floor(y_top), x_quant_right] = dx_right * dy_top
70
        single_mask[y_quant_bottom, math.floor(x_left)] = dx_left * dy_bottom
71
        single_mask[y_quant_bottom, x_quant_right] = dx_right * dy_bottom
72
73
        # edges
74
        single_mask[y_quant_top:y_quant_bottom, math.floor(x_left)] = dx_left
75
        single_mask[y_quant_top:y_quant_bottom, x_quant_right] = dx_right
76
        single_mask[math.floor(y_top), x_quant_left:x_quant_right] = dy_top
77
        single_mask[y_quant_bottom, x_quant_left:x_quant_right] = dy_bottom
78
79
        # completely inside
80
        single_mask[y_quant_top:y_quant_bottom, x_quant_left:x_quant_right] = 1
81
82
        # in case there are multiple boxes, add masks and divide by total later
83
        mask += single_mask
84
85
    if scale_annotation:
86
        mask /= mask.sum()
87
    return mask
88
89
90
def get_scaled_annotation_area(sample, args):
91
    """
92
    no_box = [{'width': 0, 'height': 0}]
93
    if sample['series'] in self.annotations_metadata:
94
        # total area of bounding boxes in
95
        areas_per_slice = [ [ box['width']*box['height'] for box in self.annotations_metadata[ sample['series'] ].get( os.path.splitext(os.path.basename(path))[0], no_box ) ] for path in sample['paths'] ]
96
        return np.array( [ np.sum(areas) for areas in areas_per_slice] )
97
    else:
98
        return np.array([ 0  for _ in sample['paths'] ])
99
    """
100
    areas = []
101
    for additional in sample["annotations"]:
102
        mask = get_scaled_annotation_mask(additional, args, scale_annotation=False)
103
        areas.append(mask.sum() / (mask.shape[0] * mask.shape[1]))
104
    return np.array(areas)
105