[0fa88a]: / pytorch_HE_stroma_vs_epithelial_tissue.ipynb

Download this file

708 lines (707 with data), 23.2 kB

{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "5e87694dcaf86e48e7423752ee27ffa3a7cc4525"
   },
   "source": [
    "## Preparing libraries"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
   },
   "outputs": [],
   "source": [
    "# libraries\n",
    "import os\n",
    "\n",
    "os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='4,5,6,7'                       ### specify GPU number here\n",
    "\n",
    "# torch.cuda.set_device(0)   \n",
    "# torch.cuda.set_device(1)   \n",
    "# torch.cuda.set_device(2)   \n",
    "# torch.cuda.set_device(3)   \n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib.image import imread\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "import torch\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "\n",
    "from torch.utils.data import TensorDataset, DataLoader,Dataset\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "\n",
    "from sklearn import svm, datasets\n",
    "from sklearn.utils.multiclass import unique_labels\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.model_selection import KFold,StratifiedKFold\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "import seaborn as sns\n",
    "\n",
    "import torch.backends.cudnn as cudnn\n",
    "import time \n",
    "import tqdm\n",
    "import random\n",
    "from PIL import Image\n",
    "train_on_gpu = True\n",
    "from torch.utils.data.sampler import SubsetRandomSampler\n",
    "from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR\n",
    "from utils import print_confusion_matrix\n",
    "import cv2 \n",
    "from sklearn.utils import shuffle\n",
    "\n",
    "\n",
    "import albumentations\n",
    "from albumentations import torch as AT\n",
    "#import pretrainedmodels\n",
    "\n",
    "import scipy.special\n",
    "\n",
    "from pytorchcv.model_provider import get_model as ptcv_get_model\n",
    "\n",
    "cudnn.benchmark = True\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "from torch.utils import data\n",
    "from PIL import Image\n",
    "\n",
    "from processing_pytorch import CancerDataset, generate_dataset_tissue_type, df_dl_features"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## define constant"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "INPUT_SHAPE = 224"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
    "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
   },
   "outputs": [],
   "source": [
    "SEED = 323\n",
    "def seed_everything(seed=SEED):\n",
    "    random.seed(seed)\n",
    "    os.environ['PYHTONHASHSEED'] = str(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "seed_everything(SEED)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "main_path = '.\\\\'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "5f92a7b85b373990c8aff56efd61fc09193ba337"
   },
   "source": [
    "## Preparing data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train, y_train,train_paths = generate_dataset_tissue_type(main_path,os.path.join(main_path,'data_train_stroma_vs_epithelial_tissue.csv'),SEED)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_test, y_test,test_paths = generate_dataset_tissue_type(main_path,os.path.join(main_path,'data_test_stroma_vs_epithelial_tissue.csv'),SEED)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_val, y_val,val_paths = generate_dataset_tissue_type(main_path,os.path.join(main_path,'data_valid_stroma_vs_epithelial_tissue.csv'),SEED)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "f2c6e4883abfb0869f277e512f343c54c70779d6"
   },
   "source": [
    "## Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "74a6d1c81be396c2993e668f0375c16a095cd793"
   },
   "outputs": [],
   "source": [
    "data_transforms = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.RandomRotate90(p=0.5),\n",
    "    albumentations.Transpose(p=0.5),\n",
    "    albumentations.Flip(p=0.5),\n",
    "    albumentations.OneOf([\n",
    "        albumentations.CLAHE(clip_limit=2), albumentations.IAASharpen(), albumentations.IAAEmboss(), \n",
    "        albumentations.RandomBrightness(), albumentations.RandomContrast(),\n",
    "        albumentations.JpegCompression(), albumentations.Blur(), albumentations.GaussNoise()], p=0.5), \n",
    "    albumentations.HueSaturationValue(p=0.5), \n",
    "    albumentations.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=45, p=0.5),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "data_transforms_test = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "data_transforms_tta0 = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.RandomRotate90(p=0.5),\n",
    "    albumentations.Transpose(p=0.5),\n",
    "    albumentations.Flip(p=0.5),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "data_transforms_tta1 = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.RandomRotate90(p=1),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "data_transforms_tta2 = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.Transpose(p=1),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "data_transforms_tta3 = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.Flip(p=1),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])\n",
    "\n",
    "dataset = CancerDataset(X_train, y_train,  transform=data_transforms)\n",
    "test_set = CancerDataset(X_test, y_test,  transform=data_transforms_test)\n",
    "val_set = CancerDataset(X_val, y_val,  transform=data_transforms_test)\n",
    "\n",
    "batch_size = 16\n",
    "num_workers = 0\n",
    "# # prepare data loaders (combine dataset and sampler)\n",
    "train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=None, num_workers=num_workers)\n",
    "valid_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, sampler=None, num_workers=num_workers)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "b137050e2ac7ad1229e7e8ace3bc3a3175340859"
   },
   "source": [
    "## Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "1b8d3de18c8a43415de2c93494c436423d67ebc0"
   },
   "outputs": [],
   "source": [
    "model_conv = ptcv_get_model(\"cbam_resnet50\", pretrained=True)\n",
    "model_conv.avg_pool = nn.AdaptiveAvgPool2d((1, 1)).cuda()\n",
    "model_conv.last_linear = nn.Sequential(nn.Dropout(0.6), nn.Linear(in_features=2048, out_features=512, bias=True), nn.SELU(),\n",
    "                                      nn.Dropout(0.8),  nn.Linear(in_features=512, out_features=1, bias=True)).cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "5d6cefbb5bca7ee5c01c77ddf1e5d6199837582c"
   },
   "outputs": [],
   "source": [
    "model_conv.cuda()\n",
    "criterion = nn.BCEWithLogitsLoss() #binary cross entropy with sigmoid\n",
    "\n",
    "optimizer = optim.Adam(model_conv.parameters(), lr=0.0004)\n",
    "\n",
    "scheduler = StepLR(optimizer, 5, gamma=0.2)\n",
    "scheduler.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if torch.cuda.device_count() > 1:\n",
    "    print(torch.cuda.device_count() )\n",
    "    model_conv = nn.DataParallel(model_conv,device_ids=[0,1,2,3])\n",
    "model_conv.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "d384f23b71a56031ae141e5fa4e8e216c862749c",
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "val_auc_max = 0\n",
    "patience = 5\n",
    "# current number of tests, where validation loss didn't increase\n",
    "p = 0\n",
    "# whether training should be stopped\n",
    "stop = False\n",
    "\n",
    "# number of epochs to train the model\n",
    "n_epochs = 20\n",
    "for epoch in range(1, n_epochs+1):\n",
    "    \n",
    "    if stop:\n",
    "        print(\"Training stop.\")\n",
    "        break\n",
    "        \n",
    "    print(time.ctime(), 'Epoch:', epoch)\n",
    "\n",
    "    train_loss = []\n",
    "    train_auc = []\n",
    "        \n",
    "    for tr_batch_i, (data, target) in enumerate(train_loader):\n",
    "        \n",
    "        model_conv.train()\n",
    "\n",
    "        data, target = data.cuda(), target.cuda()\n",
    "        #data, target = data.to(device), target.to(device)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        output = model_conv(data)\n",
    "        loss = criterion(output[:,0], target.float())\n",
    "        train_loss.append(loss.item())\n",
    "        \n",
    "        a = target.data.cpu().numpy()\n",
    "        try:\n",
    "            b = output[:,0].detach().cpu().numpy()\n",
    "            train_auc.append(roc_auc_score(a, b))\n",
    "        except:\n",
    "            pass\n",
    "\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        if (tr_batch_i+1)%600 == 0:  \n",
    "            #model_conv = nn.DataParallel(model_conv)\n",
    "            model_conv.eval()\n",
    "            val_loss = []\n",
    "            val_auc = []\n",
    "            for val_batch_i, (data, target) in enumerate(valid_loader):\n",
    "                data, target = data.cuda(), target.cuda()\n",
    "                #data, target = data.to(device), target.to(device)\n",
    "                output = model_conv(data)\n",
    "\n",
    "                loss = criterion(output[:,0], target.float())\n",
    "\n",
    "                val_loss.append(loss.item()) \n",
    "                a = target.data.cpu().numpy()\n",
    "                try:\n",
    "                    b = output[:,0].detach().cpu().numpy()\n",
    "                    val_auc.append(roc_auc_score(a, b))\n",
    "                except:\n",
    "                    pass\n",
    "\n",
    "            print('Epoch %d, batches:%d, train loss: %.4f, valid loss: %.4f.'%(epoch, tr_batch_i, np.mean(train_loss), np.mean(val_loss)) \n",
    "                  + '  train auc: %.4f, valid auc: %.4f'%(np.mean(train_auc),np.mean(val_auc)))\n",
    "            train_loss = []\n",
    "            train_auc = []\n",
    "            valid_auc = np.mean(val_auc)\n",
    "            if valid_auc > val_auc_max:\n",
    "                print('Validation auc increased ({:.6f} --> {:.6f}).  Saving model ...'.format(\n",
    "                val_auc_max,\n",
    "                valid_auc))\n",
    "                torch.save(model_conv.state_dict(), r\".//model_epoch_{}_val_{:.4f}.pt\".format(epoch ,(valid_auc*1000)))\n",
    "                #torch.save(model_conv.state_dict(), 'model.pt')\n",
    "                val_auc_max = valid_auc\n",
    "                p = 0\n",
    "            else:\n",
    "                p += 1\n",
    "                if p > patience:\n",
    "                    print('Early stop training')\n",
    "                    stop = True\n",
    "                    break   \n",
    "            scheduler.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "a88a78608a97536127d769975d4a2c32398ae130"
   },
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "964ed06f462a6c6e6503f5f4259f705e650066a4"
   },
   "source": [
    "## Generate deep learning features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "71258d16737b84ad8ef4b6f69960224f429d73e5"
   },
   "outputs": [],
   "source": [
    "model_conv.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "d427380ffd08093b5bc2a751cba7ca08c75332d6"
   },
   "outputs": [],
   "source": [
    "saved_dict = torch.load(r\".\\\\model_epoch_2_val_952.8703_with_valid_test.pt\")\n",
    "\n",
    "model_conv.load_state_dict(saved_dict)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "new_classifier = nn.Sequential(*list(model_conv.children())[-1].features).cpu()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "INPUT_SHAPE = 224\n",
    "data_transforms = albumentations.Compose([\n",
    "    albumentations.Resize(INPUT_SHAPE, INPUT_SHAPE),\n",
    "    albumentations.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),\n",
    "    AT.ToTensor()\n",
    "    ])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_features = df_dl_features(X_train,train_paths,data_transforms,new_classifier)\n",
    "train_features.to_csv(r\".\\train_features_tissue_type_he.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_features = df_dl_features(X_test,test_paths,data_transforms,new_classifier)\n",
    "test_features.to_csv(r\".\\test_features_tissue_type_he.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "valid_features = df_dl_features(X_val,val_paths,data_transforms,new_classifier)\n",
    "valid_features.to_csv(r\".\\valid_features_tissue_type_he.csv\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "_uuid": "5013176b39c0142ff6dfd2a3f013ea2ba2f2560c"
   },
   "source": [
    "## TTA inference"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "591fa5e73bf4fedf11a15e50c5fcc788cbd4a0b8"
   },
   "outputs": [],
   "source": [
    "NUM_TTA = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "9dacb064b3fbbf82a9ef49eb0a5eb1b33a74d06f"
   },
   "outputs": [],
   "source": [
    "sigmoid = lambda x: scipy.special.expit(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "01a2b59c1fe53196e7b8d1f390eae59d3e982b3f"
   },
   "outputs": [],
   "source": [
    "def def_tta(X_data,y_data):\n",
    "    for num_tta in range(NUM_TTA):\n",
    "        if num_tta==0:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms_test)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "        elif num_tta==1:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms_tta1)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "        elif num_tta==2:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms_tta2)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "        elif num_tta==3:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms_tta3)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "        elif num_tta<8:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms_tta0)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "        else:\n",
    "            test_set = CancerDataset(X_data, y_data,  transform=data_transforms)\n",
    "            test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=num_workers)\n",
    "\n",
    "        preds = []\n",
    "        for batch_i, (data, target) in enumerate(test_loader):\n",
    "            data, target = data.cuda(), target.cuda()\n",
    "            output = model_conv(data).detach()\n",
    "            pr = output[:,0].cpu().numpy()\n",
    "            for i in pr:\n",
    "                preds.append(sigmoid(i)/NUM_TTA)\n",
    "        if num_tta==0:\n",
    "            test_preds = pd.DataFrame({'imgs': test_set.image_files_list, 'preds': preds})\n",
    "            test_preds['imgs'] = test_preds['imgs'].apply(lambda x: x.split('.')[0])\n",
    "        else:\n",
    "            test_preds['preds']+=np.array(preds)\n",
    "        print(num_tta)\n",
    "    return(test_preds)\n",
    "\n",
    "test_preds = def_tta(X_test,y_test)\n",
    "valid_preds = def_tta(X_val,y_val)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## generate figures"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "predictions_test = np.array(test_preds['preds'] > 0.5).astype(int)\n",
    "validations_test=np.array(valid_preds['preds'] > 0.5).astype(int)\n",
    "new_y_test = []\n",
    "for elmt in y_test:\n",
    "    if elmt == 0:\n",
    "        new_y_test.append(\"stroma\")\n",
    "    elif elmt == 1:\n",
    "        new_y_test.append(\"epithelial tissue\")\n",
    "\n",
    "new_y_valid = []\n",
    "for elmt in y_val:\n",
    "    if elmt == 0:\n",
    "        new_y_valid.append(\"stroma\")\n",
    "    elif elmt == 1:\n",
    "        new_y_valid.append(\"epithelial tissue\")\n",
    "        \n",
    "new_pred_test = []\n",
    "for elmt in predictions_test:\n",
    "    if elmt == 0:\n",
    "        new_pred_test.append(\"stroma\")\n",
    "    elif elmt == 1:\n",
    "        new_pred_test.append(\"epithelial tissue\")\n",
    "\n",
    "new_pred_valid = []\n",
    "for elmt in validations_test:\n",
    "    if elmt == 0:\n",
    "        new_pred_valid.append(\"stroma\")\n",
    "    elif elmt == 1:\n",
    "        new_pred_valid.append(\"epithelial tissue\")\n",
    "\n",
    "fig =print_confusion_matrix(np.array(new_y_valid),new_pred_valid , class_names=[\"stroma\",\"epithelial tissue\"],normalize=True)\n",
    "fig.savefig(r\".\\new_msi_norm_tissue_validation\")\n",
    "fig_2 = print_confusion_matrix(np.array(new_y_test),new_pred_test, class_names=[\"stroma\",\"epithelial tissue\"],normalize=True)\n",
    "fig_2.savefig(r\".\\new_2_msi_norm_tissue_test\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_uuid": "01c137e8c7c8e45a4bc966fa9d815669b7c2d0be"
   },
   "outputs": [],
   "source": [
    "fpr_test, tpr_test, thresholds_test = sklearn.metrics.roc_curve(np.array(y_test),np.array(test_preds))\n",
    "roc_auc_test = sklearn.metrics.auc(fpr_test, tpr_test)\n",
    "fpr_val, tpr_val, thresholds_val = sklearn.metrics.roc_curve(np.array(y_val),np.array(valid_preds))\n",
    "roc_auc_val = sklearn.metrics.auc(fpr_val, tpr_val)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "plt.figure()\n",
    "lw = 2\n",
    "plt.plot(fpr_test, tpr_test,\n",
    "         label='ROC curve test dataset (area = {0:0.2f})'\n",
    "               ''.format(roc_auc_test),\n",
    "         color='darkorange', linewidth=2)\n",
    "\n",
    "plt.plot(fpr_val, tpr_val,\n",
    "         label='ROC curve validation dataset (area = {0:0.2f})'\n",
    "               ''.format(roc_auc_val),\n",
    "         color='green',  linewidth=2)\n",
    "plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n",
    "plt.xlim([0.0, 1.0])\n",
    "plt.ylim([0.0, 1.05])\n",
    "plt.xlabel('False Positive Rate')\n",
    "plt.ylabel('True Positive Rate')\n",
    "plt.title('Receiver operating characteristic tissue type')\n",
    "plt.legend(loc=\"lower right\")\n",
    "plt.savefig(r'.\\new_roc_curves_valid_test.png', bbox_inches='tight')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "report = sklearn.metrics.classification_report(np.array(new_y_valid), np.array(new_pred_valid), output_dict=True)\n",
    "df = pd.DataFrame(report).transpose()\n",
    "df.to_excel(r\".\\classification_report_HE_gland_vs_tissue_valid.xlsx\",index=None)\n",
    "\n",
    "report = sklearn.metrics.classification_report(np.array(new_y_test), np.array(new_pred_test), output_dict=True)\n",
    "df = pd.DataFrame(report).transpose()\n",
    "df.to_excel(r\".\\classification_report_HE_gland_vs_tissue_test.xlsx\",index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}