Diff of /Brain-Tumor-Detection.py [000000] .. [f4a75f]

Switch to side-by-side view

--- a
+++ b/Brain-Tumor-Detection.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+"""Untitled5.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+    https://colab.research.google.com/drive/1nArNVlITuU8guqfbSRpu9a5IncIaPm_a
+"""
+
+from IPython.display import clear_output
+!git clone https://github.com/matterport/Mask_RCNN.git # load Mask R-CNN code implementation
+!git clone https://github.com/ruslan-kl/brain-tumor.git # load new data set and annotations
+!pip install pycocotools
+
+!rm -rf brain-tumor/.git/
+!rm -rf Mask_RCNN/.git/
+
+clear_output()
+
+import os
+import sys
+from tqdm import tqdm
+import cv2
+import numpy as np
+import json
+import skimage.draw
+import matplotlib
+import matplotlib.pyplot as plt
+import random
+
+# Root directory of the project
+ROOT_DIR = os.path.abspath('Mask_RCNN/')
+# Import Mask RCNN
+sys.path.append(ROOT_DIR)
+from mrcnn.config import Config
+from mrcnn import utils
+from mrcnn.model import log
+import mrcnn.model as modellib
+from mrcnn import visualize
+# Import COCO config
+sys.path.append(os.path.join(ROOT_DIR, 'samples/coco/'))
+import coco
+
+plt.rcParams['figure.facecolor'] = 'white'
+
+clear_output()
+
+def get_ax(rows=1, cols=1, size=7):
+    """Return a Matplotlib Axes array to be used in
+    all visualizations in the notebook. Provide a
+    central point to control graph sizes.
+
+    Change the default size attribute to control the size
+    of rendered images
+    """
+    _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
+    return ax
+
+MODEL_DIR = os.path.join(ROOT_DIR, 'logs') # directory to save logs and trained model
+# ANNOTATIONS_DIR = 'brain-tumor/data/new/annotations/' # directory with annotations for train/val sets
+DATASET_DIR = 'brain-tumor/data_cleaned/' # directory with image data
+DEFAULT_LOGS_DIR = 'logs'
+
+# Local path to trained weights file
+COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
+# Download COCO trained weights from Releases if needed
+if not os.path.exists(COCO_MODEL_PATH):
+    utils.download_trained_weights(COCO_MODEL_PATH)
+
+class TumorConfig(Config):
+    """Configuration for training on the brain tumor dataset.
+    """
+    # Give the configuration a recognizable name
+    NAME = 'tumor_detector'
+    GPU_COUNT = 1
+    IMAGES_PER_GPU = 1
+    NUM_CLASSES = 1 + 1  # background + tumor
+    DETECTION_MIN_CONFIDENCE = 0.85
+    STEPS_PER_EPOCH = 100
+    LEARNING_RATE = 0.001
+
+config = TumorConfig()
+config.display()
+
+class BrainScanDataset(utils.Dataset):
+
+    def load_brain_scan(self, dataset_dir, subset):
+        """Load a subset of the FarmCow dataset.
+        dataset_dir: Root directory of the dataset.
+        subset: Subset to load: train or val
+        """
+        # Add classes. We have only one class to add.
+        self.add_class("tumor", 1, "tumor")
+
+        # Train or validation dataset?
+        assert subset in ["train", "val", 'test']
+        dataset_dir = os.path.join(dataset_dir, subset)
+
+        annotations = json.load(open(os.path.join(DATASET_DIR, subset, 'annotations_'+subset+'.json')))
+        annotations = list(annotations.values())  # don't need the dict keys
+
+        # The VIA tool saves images in the JSON even if they don't have any
+        # annotations. Skip unannotated images.
+        annotations = [a for a in annotations if a['regions']]
+
+        # Add images
+        for a in annotations:
+            # Get the x, y coordinaets of points of the polygons that make up
+            # the outline of each object instance. These are stores in the
+            # shape_attributes (see json format above)
+            # The if condition is needed to support VIA versions 1.x and 2.x.
+            if type(a['regions']) is dict:
+                polygons = [r['shape_attributes'] for r in a['regions'].values()]
+            else:
+                polygons = [r['shape_attributes'] for r in a['regions']]
+
+            # load_mask() needs the image size to convert polygons to masks.
+            # Unfortunately, VIA doesn't include it in JSON, so we must read
+            # the image. This is only managable since the dataset is tiny.
+            image_path = os.path.join(dataset_dir, a['filename'])
+            image = skimage.io.imread(image_path)
+            height, width = image.shape[:2]
+
+            self.add_image(
+                "tumor",
+                image_id=a['filename'],  # use file name as a unique image id
+                path=image_path,
+                width=width,
+                height=height,
+                polygons=polygons
+            )
+
+    def load_mask(self, image_id):
+        """Generate instance masks for an image.
+       Returns:
+        masks: A bool array of shape [height, width, instance count] with
+            one mask per instance.
+        class_ids: a 1D array of class IDs of the instance masks.
+        """
+        # If not a farm_cow dataset image, delegate to parent class.
+        image_info = self.image_info[image_id]
+        if image_info["source"] != "tumor":
+            return super(self.__class__, self).load_mask(image_id)
+
+        # Convert polygons to a bitmap mask of shape
+        # [height, width, instance_count]
+        info = self.image_info[image_id]
+        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
+                        dtype=np.uint8)
+        for i, p in enumerate(info["polygons"]):
+            # Get indexes of pixels inside the polygon and set them to 1
+            rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
+            mask[rr, cc, i] = 1
+
+        # Return mask, and array of class IDs of each instance. Since we have
+        # one class ID only, we return an array of 1s
+        return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
+
+    def image_reference(self, image_id):
+        """Return the path of the image."""
+        info = self.image_info[image_id]
+        if info["source"] == "tumor":
+            return info["path"]
+        else:
+            super(self.__class__, self).image_reference(image_id)
+
+model = modellib.MaskRCNN(
+    mode='training',
+    config=config,
+    model_dir=DEFAULT_LOGS_DIR
+)
+
+model.load_weights(
+    COCO_MODEL_PATH,
+    by_name=True,
+    exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]
+)
+
+# Training dataset.
+dataset_train = BrainScanDataset()
+dataset_train.load_brain_scan(DATASET_DIR, 'train')
+dataset_train.prepare()
+
+# Validation dataset
+dataset_val = BrainScanDataset()
+dataset_val.load_brain_scan(DATASET_DIR, 'val')
+dataset_val.prepare()
+
+dataset_test = BrainScanDataset()
+dataset_test.load_brain_scan(DATASET_DIR, 'test')
+dataset_test.prepare()
+
+# Since we're using a very small dataset, and starting from
+# COCO trained weights, we don't need to train too long. Also,
+# no need to train all layers, just the heads should do it.
+print("Training network heads")
+model.train(
+    dataset_train, dataset_val,
+    learning_rate=config.LEARNING_RATE,
+    epochs=15,
+    layers='heads'
+)
+
+# Recreate the model in inference mode
+model = modellib.MaskRCNN(
+    mode="inference",
+    config=config,
+    model_dir=DEFAULT_LOGS_DIR
+)
+
+# Get path to saved weights
+# Either set a specific path or find last trained weights
+# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
+model_path = model.find_last()
+
+# Load trained weights
+print("Loading weights from ", model_path)
+model.load_weights(model_path, by_name=True)
+
+def predict_and_plot_differences(dataset, img_id):
+    original_image, image_meta, gt_class_id, gt_box, gt_mask =\
+        modellib.load_image_gt(dataset, config,
+                               img_id, use_mini_mask=False)
+
+    results = model.detect([original_image], verbose=0)
+    r = results[0]
+
+    visualize.display_differences(
+        original_image,
+        gt_box, gt_class_id, gt_mask,
+        r['rois'], r['class_ids'], r['scores'], r['masks'],
+        class_names = ['tumor'], title="", ax=get_ax(),
+        show_mask=True, show_box=True)
+
+
+def display_image(dataset, ind):
+    plt.figure(figsize=(5,5))
+    plt.imshow(dataset.load_image(ind))
+    plt.xticks([])
+    plt.yticks([])
+    plt.title('Original Image')
+    plt.show()
+
+#vALIDATION SET
+ind = 9
+display_image(dataset_val, ind)
+predict_and_plot_differences(dataset_val, ind)
+
+ind = 6
+display_image(dataset_val, ind)
+predict_and_plot_differences(dataset_val, ind)
+
+#Test Set
+ind = 1
+display_image(dataset_test, ind)
+predict_and_plot_differences(dataset_test, ind)
+ind = 0
+display_image(dataset_test, ind)
+predict_and_plot_differences(dataset_test, ind)