a b/Brain-Tumor-Detection.py
1
# -*- coding: utf-8 -*-
2
"""Untitled5.ipynb
3
4
Automatically generated by Colaboratory.
5
6
Original file is located at
7
    https://colab.research.google.com/drive/1nArNVlITuU8guqfbSRpu9a5IncIaPm_a
8
"""
9
10
from IPython.display import clear_output
11
!git clone https://github.com/matterport/Mask_RCNN.git # load Mask R-CNN code implementation
12
!git clone https://github.com/ruslan-kl/brain-tumor.git # load new data set and annotations
13
!pip install pycocotools
14
15
!rm -rf brain-tumor/.git/
16
!rm -rf Mask_RCNN/.git/
17
18
clear_output()
19
20
import os
21
import sys
22
from tqdm import tqdm
23
import cv2
24
import numpy as np
25
import json
26
import skimage.draw
27
import matplotlib
28
import matplotlib.pyplot as plt
29
import random
30
31
# Root directory of the project
32
ROOT_DIR = os.path.abspath('Mask_RCNN/')
33
# Import Mask RCNN
34
sys.path.append(ROOT_DIR)
35
from mrcnn.config import Config
36
from mrcnn import utils
37
from mrcnn.model import log
38
import mrcnn.model as modellib
39
from mrcnn import visualize
40
# Import COCO config
41
sys.path.append(os.path.join(ROOT_DIR, 'samples/coco/'))
42
import coco
43
44
plt.rcParams['figure.facecolor'] = 'white'
45
46
clear_output()
47
48
def get_ax(rows=1, cols=1, size=7):
49
    """Return a Matplotlib Axes array to be used in
50
    all visualizations in the notebook. Provide a
51
    central point to control graph sizes.
52
53
    Change the default size attribute to control the size
54
    of rendered images
55
    """
56
    _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
57
    return ax
58
59
MODEL_DIR = os.path.join(ROOT_DIR, 'logs') # directory to save logs and trained model
60
# ANNOTATIONS_DIR = 'brain-tumor/data/new/annotations/' # directory with annotations for train/val sets
61
DATASET_DIR = 'brain-tumor/data_cleaned/' # directory with image data
62
DEFAULT_LOGS_DIR = 'logs'
63
64
# Local path to trained weights file
65
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
66
# Download COCO trained weights from Releases if needed
67
if not os.path.exists(COCO_MODEL_PATH):
68
    utils.download_trained_weights(COCO_MODEL_PATH)
69
70
class TumorConfig(Config):
71
    """Configuration for training on the brain tumor dataset.
72
    """
73
    # Give the configuration a recognizable name
74
    NAME = 'tumor_detector'
75
    GPU_COUNT = 1
76
    IMAGES_PER_GPU = 1
77
    NUM_CLASSES = 1 + 1  # background + tumor
78
    DETECTION_MIN_CONFIDENCE = 0.85
79
    STEPS_PER_EPOCH = 100
80
    LEARNING_RATE = 0.001
81
82
config = TumorConfig()
83
config.display()
84
85
class BrainScanDataset(utils.Dataset):
86
87
    def load_brain_scan(self, dataset_dir, subset):
88
        """Load a subset of the FarmCow dataset.
89
        dataset_dir: Root directory of the dataset.
90
        subset: Subset to load: train or val
91
        """
92
        # Add classes. We have only one class to add.
93
        self.add_class("tumor", 1, "tumor")
94
95
        # Train or validation dataset?
96
        assert subset in ["train", "val", 'test']
97
        dataset_dir = os.path.join(dataset_dir, subset)
98
99
        annotations = json.load(open(os.path.join(DATASET_DIR, subset, 'annotations_'+subset+'.json')))
100
        annotations = list(annotations.values())  # don't need the dict keys
101
102
        # The VIA tool saves images in the JSON even if they don't have any
103
        # annotations. Skip unannotated images.
104
        annotations = [a for a in annotations if a['regions']]
105
106
        # Add images
107
        for a in annotations:
108
            # Get the x, y coordinaets of points of the polygons that make up
109
            # the outline of each object instance. These are stores in the
110
            # shape_attributes (see json format above)
111
            # The if condition is needed to support VIA versions 1.x and 2.x.
112
            if type(a['regions']) is dict:
113
                polygons = [r['shape_attributes'] for r in a['regions'].values()]
114
            else:
115
                polygons = [r['shape_attributes'] for r in a['regions']]
116
117
            # load_mask() needs the image size to convert polygons to masks.
118
            # Unfortunately, VIA doesn't include it in JSON, so we must read
119
            # the image. This is only managable since the dataset is tiny.
120
            image_path = os.path.join(dataset_dir, a['filename'])
121
            image = skimage.io.imread(image_path)
122
            height, width = image.shape[:2]
123
124
            self.add_image(
125
                "tumor",
126
                image_id=a['filename'],  # use file name as a unique image id
127
                path=image_path,
128
                width=width,
129
                height=height,
130
                polygons=polygons
131
            )
132
133
    def load_mask(self, image_id):
134
        """Generate instance masks for an image.
135
       Returns:
136
        masks: A bool array of shape [height, width, instance count] with
137
            one mask per instance.
138
        class_ids: a 1D array of class IDs of the instance masks.
139
        """
140
        # If not a farm_cow dataset image, delegate to parent class.
141
        image_info = self.image_info[image_id]
142
        if image_info["source"] != "tumor":
143
            return super(self.__class__, self).load_mask(image_id)
144
145
        # Convert polygons to a bitmap mask of shape
146
        # [height, width, instance_count]
147
        info = self.image_info[image_id]
148
        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
149
                        dtype=np.uint8)
150
        for i, p in enumerate(info["polygons"]):
151
            # Get indexes of pixels inside the polygon and set them to 1
152
            rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
153
            mask[rr, cc, i] = 1
154
155
        # Return mask, and array of class IDs of each instance. Since we have
156
        # one class ID only, we return an array of 1s
157
        return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
158
159
    def image_reference(self, image_id):
160
        """Return the path of the image."""
161
        info = self.image_info[image_id]
162
        if info["source"] == "tumor":
163
            return info["path"]
164
        else:
165
            super(self.__class__, self).image_reference(image_id)
166
167
model = modellib.MaskRCNN(
168
    mode='training',
169
    config=config,
170
    model_dir=DEFAULT_LOGS_DIR
171
)
172
173
model.load_weights(
174
    COCO_MODEL_PATH,
175
    by_name=True,
176
    exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]
177
)
178
179
# Training dataset.
180
dataset_train = BrainScanDataset()
181
dataset_train.load_brain_scan(DATASET_DIR, 'train')
182
dataset_train.prepare()
183
184
# Validation dataset
185
dataset_val = BrainScanDataset()
186
dataset_val.load_brain_scan(DATASET_DIR, 'val')
187
dataset_val.prepare()
188
189
dataset_test = BrainScanDataset()
190
dataset_test.load_brain_scan(DATASET_DIR, 'test')
191
dataset_test.prepare()
192
193
# Since we're using a very small dataset, and starting from
194
# COCO trained weights, we don't need to train too long. Also,
195
# no need to train all layers, just the heads should do it.
196
print("Training network heads")
197
model.train(
198
    dataset_train, dataset_val,
199
    learning_rate=config.LEARNING_RATE,
200
    epochs=15,
201
    layers='heads'
202
)
203
204
# Recreate the model in inference mode
205
model = modellib.MaskRCNN(
206
    mode="inference",
207
    config=config,
208
    model_dir=DEFAULT_LOGS_DIR
209
)
210
211
# Get path to saved weights
212
# Either set a specific path or find last trained weights
213
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
214
model_path = model.find_last()
215
216
# Load trained weights
217
print("Loading weights from ", model_path)
218
model.load_weights(model_path, by_name=True)
219
220
def predict_and_plot_differences(dataset, img_id):
221
    original_image, image_meta, gt_class_id, gt_box, gt_mask =\
222
        modellib.load_image_gt(dataset, config,
223
                               img_id, use_mini_mask=False)
224
225
    results = model.detect([original_image], verbose=0)
226
    r = results[0]
227
228
    visualize.display_differences(
229
        original_image,
230
        gt_box, gt_class_id, gt_mask,
231
        r['rois'], r['class_ids'], r['scores'], r['masks'],
232
        class_names = ['tumor'], title="", ax=get_ax(),
233
        show_mask=True, show_box=True)
234
235
236
def display_image(dataset, ind):
237
    plt.figure(figsize=(5,5))
238
    plt.imshow(dataset.load_image(ind))
239
    plt.xticks([])
240
    plt.yticks([])
241
    plt.title('Original Image')
242
    plt.show()
243
244
#vALIDATION SET
245
ind = 9
246
display_image(dataset_val, ind)
247
predict_and_plot_differences(dataset_val, ind)
248
249
ind = 6
250
display_image(dataset_val, ind)
251
predict_and_plot_differences(dataset_val, ind)
252
253
#Test Set
254
ind = 1
255
display_image(dataset_test, ind)
256
predict_and_plot_differences(dataset_test, ind)
257
ind = 0
258
display_image(dataset_test, ind)
259
predict_and_plot_differences(dataset_test, ind)