[53d15f]: / Serialized / prepare_ensembling.ipynb

Download this file

734 lines (733 with data), 26.8 kB

{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/reina/anaconda3/envs/RSNA/lib/python3.6/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\n",
      "  return f(*args, **kwds)\n",
      "/home/reina/anaconda3/envs/RSNA/lib/python3.6/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "import os\n",
    "import datetime\n",
    "import seaborn as sns\n",
    "\n",
    "#import pydicom\n",
    "import time\n",
    "from functools import partial\n",
    "import gc\n",
    "import operator \n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.utils.data as D\n",
    "import torch.nn.functional as F\n",
    "from sklearn.model_selection import KFold\n",
    "from tqdm import tqdm, tqdm_notebook\n",
    "from IPython.core.interactiveshell import InteractiveShell\n",
    "InteractiveShell.ast_node_interactivity = \"all\"\n",
    "import warnings\n",
    "warnings.filterwarnings(action='once')\n",
    "import pickle\n",
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "%matplotlib inline\n",
    "from skimage.io import imread,imshow\n",
    "from helper import *\n",
    "import helper\n",
    "import torchvision.models as models\n",
    "from torch.optim import Adam\n",
    "from defenitions import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# here you should set which model parameters you want to choose (see definitions.py) and what GPU to use\n",
    "\n",
    "device=device_by_name(\"Tesla\") # RTX , cpu\n",
    "torch.cuda.set_device(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(674252, 15)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/plain": [
       "(674252, 15)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>PatientID</th>\n",
       "      <th>epidural</th>\n",
       "      <th>intraparenchymal</th>\n",
       "      <th>intraventricular</th>\n",
       "      <th>subarachnoid</th>\n",
       "      <th>subdural</th>\n",
       "      <th>any</th>\n",
       "      <th>PID</th>\n",
       "      <th>StudyI</th>\n",
       "      <th>SeriesI</th>\n",
       "      <th>WindowCenter</th>\n",
       "      <th>WindowWidth</th>\n",
       "      <th>ImagePositionZ</th>\n",
       "      <th>ImagePositionX</th>\n",
       "      <th>ImagePositionY</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>63eb1e259</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>a449357f</td>\n",
       "      <td>62d125e5b2</td>\n",
       "      <td>0be5c0d1b3</td>\n",
       "      <td>['00036', '00036']</td>\n",
       "      <td>['00080', '00080']</td>\n",
       "      <td>180.199951</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-8.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2669954a7</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>363d5865</td>\n",
       "      <td>a20b80c7bf</td>\n",
       "      <td>3564d584db</td>\n",
       "      <td>['00047', '00047']</td>\n",
       "      <td>['00080', '00080']</td>\n",
       "      <td>922.530821</td>\n",
       "      <td>-156.0</td>\n",
       "      <td>45.572849</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>52c9913b1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>9c2b4bd7</td>\n",
       "      <td>3e3634f8cf</td>\n",
       "      <td>973274ffc9</td>\n",
       "      <td>40</td>\n",
       "      <td>150</td>\n",
       "      <td>4.455000</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-115.063000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>4e6ff6126</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>3ae81c2d</td>\n",
       "      <td>a1390c15c2</td>\n",
       "      <td>e5ccad8244</td>\n",
       "      <td>['00036', '00036']</td>\n",
       "      <td>['00080', '00080']</td>\n",
       "      <td>100.000000</td>\n",
       "      <td>-99.5</td>\n",
       "      <td>28.500000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>7858edd88</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>c1867feb</td>\n",
       "      <td>c73e81ed3a</td>\n",
       "      <td>28e0531b3a</td>\n",
       "      <td>40</td>\n",
       "      <td>100</td>\n",
       "      <td>145.793000</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-132.190000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   PatientID  epidural  intraparenchymal  intraventricular  subarachnoid  \\\n",
       "0  63eb1e259         0                 0                 0             0   \n",
       "1  2669954a7         0                 0                 0             0   \n",
       "2  52c9913b1         0                 0                 0             0   \n",
       "3  4e6ff6126         0                 0                 0             0   \n",
       "4  7858edd88         0                 0                 0             0   \n",
       "\n",
       "   subdural  any       PID      StudyI     SeriesI        WindowCenter  \\\n",
       "0         0    0  a449357f  62d125e5b2  0be5c0d1b3  ['00036', '00036']   \n",
       "1         0    0  363d5865  a20b80c7bf  3564d584db  ['00047', '00047']   \n",
       "2         0    0  9c2b4bd7  3e3634f8cf  973274ffc9                  40   \n",
       "3         0    0  3ae81c2d  a1390c15c2  e5ccad8244  ['00036', '00036']   \n",
       "4         0    0  c1867feb  c73e81ed3a  28e0531b3a                  40   \n",
       "\n",
       "          WindowWidth  ImagePositionZ  ImagePositionX  ImagePositionY  \n",
       "0  ['00080', '00080']      180.199951          -125.0       -8.000000  \n",
       "1  ['00080', '00080']      922.530821          -156.0       45.572849  \n",
       "2                 150        4.455000          -125.0     -115.063000  \n",
       "3  ['00080', '00080']      100.000000           -99.5       28.500000  \n",
       "4                 100      145.793000          -125.0     -132.190000  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df = pd.read_csv(data_dir+'train.csv')\n",
    "train_df.shape\n",
    "train_df=train_df[~train_df.PatientID.isin(bad_images)].reset_index(drop=True)\n",
    "train_df=train_df.drop_duplicates().reset_index(drop=True)\n",
    "train_df.shape\n",
    "train_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>PatientID</th>\n",
       "      <th>epidural</th>\n",
       "      <th>intraparenchymal</th>\n",
       "      <th>intraventricular</th>\n",
       "      <th>subarachnoid</th>\n",
       "      <th>subdural</th>\n",
       "      <th>any</th>\n",
       "      <th>SeriesI</th>\n",
       "      <th>PID</th>\n",
       "      <th>StudyI</th>\n",
       "      <th>WindowCenter</th>\n",
       "      <th>WindowWidth</th>\n",
       "      <th>ImagePositionZ</th>\n",
       "      <th>ImagePositionX</th>\n",
       "      <th>ImagePositionY</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>28fbab7eb</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>ebfd7e4506</td>\n",
       "      <td>cf1b6b11</td>\n",
       "      <td>93407cadbb</td>\n",
       "      <td>30</td>\n",
       "      <td>80</td>\n",
       "      <td>158.458000</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-135.598000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>877923b8b</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>6d95084e15</td>\n",
       "      <td>ad8ea58f</td>\n",
       "      <td>a337baa067</td>\n",
       "      <td>30</td>\n",
       "      <td>80</td>\n",
       "      <td>138.729050</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-101.797981</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>a591477cb</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>8e06b2c9e0</td>\n",
       "      <td>ecfb278b</td>\n",
       "      <td>0cfe838d54</td>\n",
       "      <td>30</td>\n",
       "      <td>80</td>\n",
       "      <td>60.830002</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-133.300003</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>42217c898</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>e800f419cf</td>\n",
       "      <td>e96e31f4</td>\n",
       "      <td>c497ac5bad</td>\n",
       "      <td>30</td>\n",
       "      <td>80</td>\n",
       "      <td>55.388000</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-146.081000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>a130c4d2f</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>0.5</td>\n",
       "      <td>faeb7454f3</td>\n",
       "      <td>69affa42</td>\n",
       "      <td>854e4fbc01</td>\n",
       "      <td>30</td>\n",
       "      <td>80</td>\n",
       "      <td>33.516888</td>\n",
       "      <td>-125.0</td>\n",
       "      <td>-118.689819</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   PatientID  epidural  intraparenchymal  intraventricular  subarachnoid  \\\n",
       "0  28fbab7eb       0.5               0.5               0.5           0.5   \n",
       "1  877923b8b       0.5               0.5               0.5           0.5   \n",
       "2  a591477cb       0.5               0.5               0.5           0.5   \n",
       "3  42217c898       0.5               0.5               0.5           0.5   \n",
       "4  a130c4d2f       0.5               0.5               0.5           0.5   \n",
       "\n",
       "   subdural  any     SeriesI       PID      StudyI WindowCenter WindowWidth  \\\n",
       "0       0.5  0.5  ebfd7e4506  cf1b6b11  93407cadbb           30          80   \n",
       "1       0.5  0.5  6d95084e15  ad8ea58f  a337baa067           30          80   \n",
       "2       0.5  0.5  8e06b2c9e0  ecfb278b  0cfe838d54           30          80   \n",
       "3       0.5  0.5  e800f419cf  e96e31f4  c497ac5bad           30          80   \n",
       "4       0.5  0.5  faeb7454f3  69affa42  854e4fbc01           30          80   \n",
       "\n",
       "   ImagePositionZ  ImagePositionX  ImagePositionY  \n",
       "0      158.458000          -125.0     -135.598000  \n",
       "1      138.729050          -125.0     -101.797981  \n",
       "2       60.830002          -125.0     -133.300003  \n",
       "3       55.388000          -125.0     -146.081000  \n",
       "4       33.516888          -125.0     -118.689819  "
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_df = pd.read_csv(data_dir+'test.csv')\n",
    "test_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def my_loss(y_pred,y_true,weights):\n",
    "    window=(y_true>=0).to(torch.float)\n",
    "    loss = (F.binary_cross_entropy_with_logits(y_pred,y_true,reduction='none')*window*weights.expand_as(y_true)).mean()/(window.mean()+1e-7)\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Metric():\n",
    "    def __init__(self,weights,k=0.03):\n",
    "        self.weights=weights\n",
    "        self.k=k\n",
    "        self.zero()\n",
    "        \n",
    "    def zero(self):\n",
    "        self.loss_sum=0.\n",
    "        self.loss_count=0.\n",
    "        self.lossf=0.\n",
    "        \n",
    "    def calc(self,y_pred,y_true,prefix=\"\"):\n",
    "        window=(y_true>=0).to(torch.float)\n",
    "        loss = (F.binary_cross_entropy_with_logits(y_pred,y_true,reduction='none')*window*self.weights.expand_as(y_true)).mean()/(window.mean()+1e-5)\n",
    "        self.lossf=self.lossf*(1-self.k)+loss*self.k\n",
    "        self.loss_sum=self.loss_sum+loss*window.sum()\n",
    "        self.loss_count=self.loss_count+window.sum()\n",
    "        return({prefix+'mloss':self.lossf})    \n",
    "        \n",
    "    def calc_sums(self,prefix=\"\"):\n",
    "        return({prefix+'mloss_tot':self.loss_sum/self.loss_count})    \n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_names=[]\n",
    "types_train=[]\n",
    "types_test=[]\n",
    "versions=[]\n",
    "num_splits =[]\n",
    "seeds=[]\n",
    "for key in parameters.keys():\n",
    "    model_names.append(parameters[key]['model_name'])\n",
    "    types_train.append(parameters[key]['train_features'])\n",
    "    types_test.append(parameters[key]['test_features'])\n",
    "    versions.append(parameters[key]['version'])\n",
    "    num_splits.append(parameters[key]['n_splits'])\n",
    "    seeds.append(parameters[key]['SEED'])   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "multi=3\n",
    "for model_name,type_,version_,n,SEED in zip(model_names,types_train,versions,num_splits,seeds):\n",
    "    for num_split in tqdm_notebook(range(n)):\n",
    "        pickle_file=open(outputs_dir+'PID_splits_{}.pkl'.format(n_splits),'rb')\n",
    "        split_sid,splits=pickle.load(pickle_file)\n",
    "        pickle_file.close()\n",
    "        pred_list=[]\n",
    "        print(model_name,version_,type_,num_split) \n",
    "        pickle_file=open(outputs_dir+outputs_format.format(model_name,version_,type_,num_split),'rb')\n",
    "        features=pickle.load(pickle_file)\n",
    "        pickle_file.close()\n",
    "        features=features.reshape(features.shape[0]//4,4,-1)\n",
    "        split_validate =  train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].SeriesI.unique()\n",
    "        model=ResModelPool(features.shape[-1])\n",
    "        version=version_+'_fullhead_resmodel_pool2_{}'.format(multi)\n",
    "\n",
    "        model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))\n",
    "\n",
    "        valid_dataset=FullHeadDataset(train_df,\n",
    "                                      split_validate,\n",
    "                                      features,\n",
    "                                      'SeriesI',\n",
    "                                      'ImagePositionZ',\n",
    "                                      multi =3)\n",
    "\n",
    "        win_dataset=FullHeadDataset(train_df,\n",
    "                                      split_validate,\n",
    "                                      features,\n",
    "                                      'SeriesI',\n",
    "                                      'ImagePositionZ',\n",
    "                                       target_columns=hemorrhage_types)\n",
    "        win_list=[]\n",
    "        dl = D.DataLoader(win_dataset,batch_size=128,num_workers=16)\n",
    "        for _,win in tqdm_notebook(dl):\n",
    "            win_list.append(win.reshape(win.shape[0]*win.shape[1],-1))    \n",
    "        wins = torch.cat(win_list,0).sum(1)>=0\n",
    "        wins.sum()\n",
    "        for i in tqdm_notebook(range(32),leave=False):\n",
    "            pr = model_run(model,valid_dataset,do_apex=False,batch_size=128)\n",
    "            pred_list.append(pr.reshape(pr.shape[0]*pr.shape[1],-1)[wins])\n",
    "        pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'OOF_pred',num_split),'wb')\n",
    "        pickle.dump(pred_list,pickle_file,protocol=4)\n",
    "        pickle_file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "multi=3\n",
    "for model_name,type_,version_,n,SEED in zip(model_names,types_test,versions,num_splits,seeds):\n",
    "    for num_split in tqdm_notebook(range(n)):\n",
    "        pred_list=[]\n",
    "        print(model_name,version_,type_,num_split) \n",
    "        pickle_file=open(outputs_dir+outputs_format.format(model_name,version_,type_,num_split),'rb')\n",
    "        features=pickle.load(pickle_file)\n",
    "        pickle_file.close()\n",
    "        features=features.reshape(features.shape[0]//8,8,-1)\n",
    "        print(features.shape)\n",
    "        model=ResModelPool(features.shape[-1])\n",
    "        version=version_+'_fullhead_resmodel_pool2_{}'.format(multi)\n",
    "\n",
    "        model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))\n",
    "\n",
    "        valid_dataset=FullHeadDataset(test_df,\n",
    "                                      test_df.SeriesI.unique(),\n",
    "                                      features,\n",
    "                                      'SeriesI',\n",
    "                                      'ImagePositionZ',\n",
    "                                      multi =4)\n",
    "\n",
    "        win_dataset=FullHeadDataset(test_df,\n",
    "                                      test_df.SeriesI.unique(),\n",
    "                                      features,\n",
    "                                      'SeriesI',\n",
    "                                      'ImagePositionZ',\n",
    "                                       target_columns=hemorrhage_types)\n",
    "        win_list=[]\n",
    "        dl = D.DataLoader(win_dataset,batch_size=128,num_workers=16)\n",
    "        for _,win in tqdm_notebook(dl):\n",
    "            win_list.append(win.reshape(win.shape[0]*win.shape[1],-1))    \n",
    "        wins = torch.cat(win_list,0).sum(1)>=0\n",
    "        wins.sum()\n",
    "        for i in tqdm_notebook(range(32),leave=False):\n",
    "            pr = model_run(model,valid_dataset,do_apex=False,batch_size=128)\n",
    "            pred_list.append(pr.reshape(pr.shape[0]*pr.shape[1],-1)[wins])\n",
    "        pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'test_pred_ensemble',num_split),'wb')\n",
    "        pickle.dump(pred_list,pickle_file,protocol=4)\n",
    "        pickle_file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "OOF_ids={}\n",
    "SEED=8153\n",
    "n_splits=3\n",
    "pickle_file=open(outputs_dir+'PID_splits_{}.pkl'.format(n_splits),'rb')\n",
    "split_sid,splits=pickle.load(pickle_file)\n",
    "pickle_file.close()\n",
    "for i in range( n_splits):\n",
    "    images_id_list=[]\n",
    "    split_validate =  train_df[train_df.PID.isin(set(split_sid[splits[i][1]]))].SeriesI.unique()\n",
    "    image_arr=train_df.PatientID.values\n",
    "    ref_arr=train_df.SeriesI.values\n",
    "    order_arr=train_df.ImagePositionZ.values\n",
    "    for s in tqdm_notebook(split_validate):\n",
    "        head_idx = np.where(ref_arr==s)[0]\n",
    "        sorted_head_idx=head_idx[np.argsort(order_arr[head_idx])]\n",
    "        images_id_list.append(image_arr[sorted_head_idx])\n",
    "    image_ids=np.concatenate(images_id_list)\n",
    "    print(image_ids.shape,train_df[train_df.PID.isin(set(split_sid[splits[i][1]]))].shape[0])\n",
    "    OOF_ids[i]=image_ids\n",
    "\n",
    "\n",
    "pickle_file=open(outputs_dir+'OOF_validation_image_ids_{}.pkl'.format(n_splits),'wb')\n",
    "pickle.dump(OOF_ids,pickle_file,protocol=4)\n",
    "pickle_file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "OOF_ids={}\n",
    "SEED=432\n",
    "n_splits=5\n",
    "pickle_file=open(outputs_dir+'PID_splits_{}.pkl'.format(n_splits),'rb')\n",
    "split_sid,splits=pickle.load(pickle_file)\n",
    "pickle_file.close()\n",
    "for i in range( n_splits):\n",
    "    images_id_list=[]\n",
    "    split_validate =  train_df[train_df.PID.isin(set(split_sid[splits[i][1]]))].SeriesI.unique()\n",
    "    image_arr=train_df.PatientID.values\n",
    "    ref_arr=train_df.SeriesI.values\n",
    "    order_arr=train_df.ImagePositionZ.values\n",
    "    for s in tqdm_notebook(split_validate):\n",
    "        head_idx = np.where(ref_arr==s)[0]\n",
    "        sorted_head_idx=head_idx[np.argsort(order_arr[head_idx])]\n",
    "        images_id_list.append(image_arr[sorted_head_idx])\n",
    "    image_ids=np.concatenate(images_id_list)\n",
    "    print(image_ids.shape,train_df[train_df.PID.isin(set(split_sid[splits[i][1]]))].shape[0])\n",
    "    OOF_ids[i]=image_ids\n",
    "\n",
    "\n",
    "pickle_file=open(outputs_dir+'OOF_validation_image_ids_{}.pkl'.format(n_splits),'wb')\n",
    "pickle.dump(OOF_ids,pickle_file,protocol=4)\n",
    "pickle_file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "05087bea7c1b4380a9c6bd7ff81eb4bd",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=2214), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "images_id_list=[]\n",
    "dummeys=[]\n",
    "image_arr=test_df.PatientID.values\n",
    "ref_arr=test_df.SeriesI.values\n",
    "order_arr=test_df.ImagePositionZ.values\n",
    "for s in tqdm_notebook(test_df.SeriesI.unique()):\n",
    "    dumm=np.zeros(60)\n",
    "    head_idx = np.where(ref_arr==s)[0]\n",
    "    sorted_head_idx=head_idx[np.argsort(order_arr[head_idx])]\n",
    "    images_id_list.append(image_arr[sorted_head_idx])\n",
    "    dumm[0:head_idx.shape[0]]=1\n",
    "    dummeys.append(dumm)\n",
    "image_ids=np.concatenate(images_id_list)\n",
    "select=np.concatenate(dummeys)==1\n",
    "\n",
    "pickle_file=open(outputs_dir+'ensemble_test_image_ids.pkl','wb')\n",
    "pickle.dump(image_ids,pickle_file,protocol=4)\n",
    "pickle_file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}