[0f2bcf]: / reproduce_table3.ipynb

Download this file

420 lines (419 with data), 16.9 kB

{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Tutorial - TCGA\n",
    "\n",
    "### Generating Results in Table 3 (TCGA Dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "import random\n",
    "import sys, os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import import_data as impt\n",
    "from helper import f_get_minibatch_set, evaluate\n",
    "from class_DeepIMV_AISTATS import DeepIMV_AISTATS"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "year         = 1\n",
    "DATASET_PATH = 'TCGA_{}YR'.format(int(year))\n",
    "DATASET      = 'TCGA'\n",
    "\n",
    "X_set_comp, Y_onehot_comp, Mask_comp, X_set_incomp, Y_onehot_incomp, Mask_incomp = impt.import_dataset_TCGA(year)\n",
    "\n",
    "MODE       = 'incomplete'\n",
    "model_name = 'DeepIMV_AISTATS'\n",
    "\n",
    "M = len(X_set_comp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "SEED = 1234\n",
    "OUTITERATION = 5\n",
    "\n",
    "RESULTS_AUROC_RAND = np.zeros([4, OUTITERATION+2])\n",
    "RESULTS_AUPRC_RAND = np.zeros([4, OUTITERATION+2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "out_itr = 1\n",
    "\n",
    "tr_X_set, te_X_set, va_X_set = {}, {}, {}\n",
    "for m in range(M):\n",
    "    tr_X_set[m],te_X_set[m] = train_test_split(X_set_comp[m], test_size=0.2, random_state=SEED + out_itr)\n",
    "    tr_X_set[m],va_X_set[m] = train_test_split(tr_X_set[m], test_size=0.2, random_state=SEED + out_itr)\n",
    "    \n",
    "tr_Y_onehot,te_Y_onehot, tr_M,te_M = train_test_split(Y_onehot_comp, Mask_comp, test_size=0.2, random_state=SEED + out_itr)\n",
    "tr_Y_onehot,va_Y_onehot, tr_M,va_M = train_test_split(tr_Y_onehot, tr_M, test_size=0.2, random_state=SEED + out_itr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(5850, 4)\n"
     ]
    }
   ],
   "source": [
    "if MODE == 'incomplete':\n",
    "    for m in range(M):\n",
    "        tr_X_set[m] = np.concatenate([tr_X_set[m], X_set_incomp[m]], axis=0)\n",
    "\n",
    "    tr_Y_onehot = np.concatenate([tr_Y_onehot, Y_onehot_incomp], axis=0)\n",
    "    tr_M        = np.concatenate([tr_M, Mask_incomp], axis=0)\n",
    "    \n",
    "    print(tr_M.shape)\n",
    "elif MODE == 'complete':\n",
    "    print(tr_M.shape)\n",
    "else:\n",
    "    raise ValueError('WRONG MODE!!!')\n",
    "    \n",
    "\n",
    "save_path = '{}/M{}_{}/{}/'.format(DATASET_PATH, M, MODE, model_name)\n",
    "    \n",
    "    \n",
    "if not os.path.exists(save_path + 'itr{}/'.format(out_itr)):\n",
    "    os.makedirs(save_path + 'itr{}/'.format(out_itr))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Hyper-parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "### training coefficients\n",
    "alpha    = 1.0\n",
    "beta     = 0.01 # IB coefficient\n",
    "lr_rate  = 1e-4\n",
    "k_prob   = 0.7\n",
    "\n",
    "\n",
    "### network parameters\n",
    "mb_size         = 32 \n",
    "steps_per_batch = int(np.shape(tr_M)[0]/mb_size)\n",
    "steps_per_batch = 500\n",
    "\n",
    "x_dim_set    = [tr_X_set[m].shape[1] for m in range(len(tr_X_set))]\n",
    "y_dim        = np.shape(tr_Y_onehot)[1]\n",
    "y_type       = 'binary'\n",
    "z_dim        = 100\n",
    "\n",
    "h_dim_p      = 100\n",
    "num_layers_p = 2\n",
    "\n",
    "h_dim_e      = 300\n",
    "num_layers_e = 3\n",
    "\n",
    "input_dims = {\n",
    "    'x_dim_set': x_dim_set,\n",
    "    'y_dim': y_dim,\n",
    "    'y_type': y_type,\n",
    "    'z_dim': z_dim,\n",
    "    \n",
    "    'steps_per_batch': steps_per_batch\n",
    "}\n",
    "\n",
    "network_settings = {\n",
    "    'h_dim_p1': h_dim_p,\n",
    "    'num_layers_p1': num_layers_p,   #view-specific\n",
    "    'h_dim_p2': h_dim_p,\n",
    "    'num_layers_p2': num_layers_p,  #multi-view\n",
    "    'h_dim_e': h_dim_e,\n",
    "    'num_layers_e': num_layers_e,\n",
    "    'fc_activate_fn': tf.nn.relu,\n",
    "    'reg_scale': 0., #1e-4,\n",
    "}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "\n",
    "# gpu_options = tf.GPUOptions()\n",
    "gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.22)\n",
    "sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n",
    "\n",
    "model = DeepIMV_AISTATS(sess, \"DeepIMV_AISTATS\", input_dims, network_settings)\n",
    "\n",
    "saver = tf.train.Saver()\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "00500: TRAIN| Lt=6.454 Lp=0.994 Lkl=42.950 Lps=4.288 Lkls=74.204 Lc=49.079 | VALID| Lt=5.554 Lp=0.929 Lkl=45.010 Lps=3.849 Lkls=32.576 Lc=46.531 score=(0.5993706489643726, 0.20887111832184874)\n",
      "saved...\n",
      "01000: TRAIN| Lt=5.237 Lp=0.856 Lkl=39.923 Lps=3.558 Lkls=42.400 Lc=43.212 | VALID| Lt=4.698 Lp=0.793 Lkl=43.676 Lps=3.274 Lkls=19.390 Lc=43.746 score=(0.7675711791710671, 0.2945589414475165)\n",
      "saved...\n",
      "01500: TRAIN| Lt=4.808 Lp=0.744 Lkl=38.987 Lps=3.332 Lkls=34.243 Lc=41.684 | VALID| Lt=4.419 Lp=0.717 Lkl=42.991 Lps=3.106 Lkls=16.574 Lc=43.175 score=(0.7953315947151756, 0.32247570838825707)\n",
      "saved...\n",
      "02000: TRAIN| Lt=4.523 Lp=0.687 Lkl=38.207 Lps=3.151 Lkls=30.336 Lc=40.726 | VALID| Lt=4.272 Lp=0.679 Lkl=42.528 Lps=3.014 Lkls=15.384 Lc=42.915 score=(0.7921632864193805, 0.36678881900359384)\n",
      "saved...\n",
      "02500: TRAIN| Lt=4.286 Lp=0.633 Lkl=37.788 Lps=2.994 Lkls=28.058 Lc=40.309 | VALID| Lt=4.211 Lp=0.677 Lkl=42.190 Lps=2.970 Lkls=14.196 Lc=42.683 score=(0.7951160635385909, 0.36298993132414803)\n",
      "saved...\n",
      "03000: TRAIN| Lt=4.229 Lp=0.621 Lkl=37.510 Lps=2.975 Lkls=25.871 Lc=39.875 | VALID| Lt=4.188 Lp=0.676 Lkl=41.952 Lps=2.961 Lkls=13.201 Lc=42.500 score=(0.8097075241933746, 0.41376471395985764)\n",
      "saved...\n",
      "03500: TRAIN| Lt=4.001 Lp=0.570 Lkl=37.365 Lps=2.802 Lkls=25.548 Lc=39.800 | VALID| Lt=4.182 Lp=0.675 Lkl=41.812 Lps=2.959 Lkls=12.999 Lc=42.445 score=(0.7994482401879431, 0.38641092091905127)\n",
      "saved...\n",
      "04000: TRAIN| Lt=3.809 Lp=0.524 Lkl=37.191 Lps=2.659 Lkls=25.413 Lc=39.659 | VALID| Lt=4.171 Lp=0.673 Lkl=41.692 Lps=2.951 Lkls=12.952 Lc=42.425 score=(0.8013664676595469, 0.40857793712669693)\n",
      "saved...\n",
      "04500: TRAIN| Lt=3.721 Lp=0.500 Lkl=37.069 Lps=2.603 Lkls=24.795 Lc=39.503 | VALID| Lt=4.199 Lp=0.691 Lkl=41.590 Lps=2.970 Lkls=12.149 Lc=42.290 score=(0.8095350992521069, 0.3908763759078532)\n",
      "05000: TRAIN| Lt=3.584 Lp=0.468 Lkl=36.996 Lps=2.499 Lkls=24.732 Lc=39.387 | VALID| Lt=4.222 Lp=0.683 Lkl=41.568 Lps=3.004 Lkls=11.931 Lc=42.247 score=(0.8059680582796301, 0.3793825246248715)\n",
      "05500: TRAIN| Lt=3.491 Lp=0.415 Lkl=37.158 Lps=2.448 Lkls=25.743 Lc=39.493 | VALID| Lt=4.229 Lp=0.691 Lkl=41.672 Lps=2.999 Lkls=12.135 Lc=42.268 score=(0.8071858094273336, 0.3889992215812622)\n",
      "06000: TRAIN| Lt=3.203 Lp=0.369 Lkl=37.194 Lps=2.200 Lkls=26.217 Lc=39.626 | VALID| Lt=4.317 Lp=0.713 Lkl=41.652 Lps=3.064 Lkls=12.409 Lc=42.303 score=(0.8036295450136862, 0.37662407670056136)\n",
      "06500: TRAIN| Lt=3.164 Lp=0.335 Lkl=37.223 Lps=2.193 Lkls=26.342 Lc=39.580 | VALID| Lt=4.451 Lp=0.739 Lkl=41.631 Lps=3.180 Lkls=11.647 Lc=42.185 score=(0.8091255900165959, 0.3946424315886283)\n",
      "07000: TRAIN| Lt=2.999 Lp=0.304 Lkl=37.251 Lps=2.057 Lkls=26.575 Lc=39.603 | VALID| Lt=4.551 Lp=0.769 Lkl=41.639 Lps=3.248 Lkls=11.771 Lc=42.199 score=(0.8006552147768174, 0.370863714209815)\n",
      "07500: TRAIN| Lt=2.906 Lp=0.282 Lkl=37.167 Lps=1.988 Lkls=26.441 Lc=39.570 | VALID| Lt=4.596 Lp=0.778 Lkl=41.552 Lps=3.290 Lkls=11.228 Lc=42.119 score=(0.8097290773110329, 0.39510639377535883)\n",
      "08000: TRAIN| Lt=2.769 Lp=0.247 Lkl=37.136 Lps=1.886 Lkls=26.457 Lc=39.533 | VALID| Lt=4.599 Lp=0.801 Lkl=41.528 Lps=3.270 Lkls=11.238 Lc=42.125 score=(0.8128542793715111, 0.41143309089456515)\n",
      "08500: TRAIN| Lt=2.716 Lp=0.232 Lkl=37.163 Lps=1.841 Lkls=27.146 Lc=39.555 | VALID| Lt=4.615 Lp=0.807 Lkl=41.499 Lps=3.282 Lkls=11.072 Lc=42.104 score=(0.8085221027221589, 0.38547322403694323)\n",
      "09000: TRAIN| Lt=2.657 Lp=0.220 Lkl=37.092 Lps=1.803 Lkls=26.330 Lc=39.509 | VALID| Lt=4.735 Lp=0.817 Lkl=41.439 Lps=3.395 Lkls=10.821 Lc=42.076 score=(0.8157639502554044, 0.39705578680902437)\n",
      "09500: TRAIN| Lt=2.580 Lp=0.201 Lkl=37.165 Lps=1.741 Lkls=26.648 Lc=39.570 | VALID| Lt=4.852 Lp=0.872 Lkl=41.456 Lps=3.461 Lkls=10.475 Lc=42.012 score=(0.8036079918960277, 0.3835206157275273)\n",
      "10000: TRAIN| Lt=2.514 Lp=0.188 Lkl=37.163 Lps=1.687 Lkls=26.721 Lc=39.563 | VALID| Lt=4.793 Lp=0.864 Lkl=41.406 Lps=3.411 Lkls=10.398 Lc=42.004 score=(0.8004612367178912, 0.384517869168757)\n",
      "10500: TRAIN| Lt=2.440 Lp=0.169 Lkl=37.154 Lps=1.631 Lkls=26.868 Lc=39.589 | VALID| Lt=4.870 Lp=0.884 Lkl=41.363 Lps=3.468 Lkls=10.429 Lc=42.012 score=(0.7988663060111645, 0.3839728905168826)\n",
      "11000: TRAIN| Lt=2.398 Lp=0.161 Lkl=37.106 Lps=1.602 Lkls=26.449 Lc=39.480 | VALID| Lt=4.816 Lp=0.854 Lkl=41.332 Lps=3.449 Lkls=9.962 Lc=41.958 score=(0.7941677263616183, 0.3776828074521237)\n",
      "11500: TRAIN| Lt=2.306 Lp=0.141 Lkl=37.107 Lps=1.524 Lkls=27.002 Lc=39.504 | VALID| Lt=5.125 Lp=0.943 Lkl=41.346 Lps=3.669 Lkls=10.021 Lc=41.967 score=(0.8140612539603854, 0.3916890388476985)\n",
      "12000: TRAIN| Lt=2.222 Lp=0.129 Lkl=37.141 Lps=1.452 Lkls=26.963 Lc=39.599 | VALID| Lt=5.303 Lp=0.986 Lkl=41.324 Lps=3.803 Lkls=10.081 Lc=41.972 score=(0.795999741362588, 0.38850082170618294)\n",
      "12500: TRAIN| Lt=2.237 Lp=0.127 Lkl=37.083 Lps=1.468 Lkls=27.086 Lc=39.497 | VALID| Lt=5.258 Lp=0.980 Lkl=41.319 Lps=3.767 Lkls=9.768 Lc=41.938 score=(0.8033062482488093, 0.3854664963559042)\n",
      "13000: TRAIN| Lt=2.173 Lp=0.116 Lkl=37.029 Lps=1.416 Lkls=27.096 Lc=39.430 | VALID| Lt=5.308 Lp=1.004 Lkl=41.298 Lps=3.793 Lkls=9.795 Lc=41.938 score=(0.7964954630687329, 0.37666072443224835)\n",
      "13500: TRAIN| Lt=2.111 Lp=0.113 Lkl=36.941 Lps=1.363 Lkls=26.571 Lc=39.395 | VALID| Lt=5.416 Lp=1.027 Lkl=41.269 Lps=3.882 Lkls=9.464 Lc=41.890 score=(0.8053969006616808, 0.38825824120686886)\n",
      "14000: TRAIN| Lt=2.074 Lp=0.105 Lkl=37.012 Lps=1.334 Lkls=26.482 Lc=39.457 | VALID| Lt=5.368 Lp=1.013 Lkl=41.223 Lps=3.849 Lkls=9.415 Lc=41.889 score=(0.7898139965946074, 0.3696094392582078)\n",
      "FINISHED...\n"
     ]
    }
   ],
   "source": [
    "saver = tf.train.Saver()\n",
    "sess.run(tf.global_variables_initializer())\n",
    "\n",
    "ITERATION = 500000\n",
    "STEPSIZE  = 500\n",
    "\n",
    "min_loss  = 1e+8   \n",
    "max_acc   = 0.0\n",
    "max_flag  = 20\n",
    "\n",
    "tr_avg_Lt, tr_avg_Lp, tr_avg_Lkl, tr_avg_Lps, tr_avg_Lkls, tr_avg_Lc = 0, 0, 0, 0, 0, 0\n",
    "va_avg_Lt, va_avg_Lp, va_avg_Lkl, va_avg_Lps, va_avg_Lkls, va_avg_Lc = 0, 0, 0, 0, 0, 0\n",
    "    \n",
    "stop_flag = 0\n",
    "for itr in range(ITERATION):\n",
    "    x_mb_set, y_mb, m_mb          = f_get_minibatch_set(mb_size, tr_X_set, tr_Y_onehot, tr_M)     \n",
    "   \n",
    "    _, Lt, Lp, Lkl, Lps, Lkls, Lc = model.train(x_mb_set, y_mb, m_mb, alpha, beta, lr_rate, k_prob)\n",
    "\n",
    "    tr_avg_Lt   += Lt/STEPSIZE\n",
    "    tr_avg_Lp   += Lp/STEPSIZE\n",
    "    tr_avg_Lkl  += Lkl/STEPSIZE\n",
    "    tr_avg_Lps  += Lps/STEPSIZE\n",
    "    tr_avg_Lkls += Lkls/STEPSIZE\n",
    "    tr_avg_Lc   += Lc/STEPSIZE\n",
    "\n",
    "    \n",
    "    x_mb_set, y_mb, m_mb          = f_get_minibatch_set(min(np.shape(va_M)[0], mb_size), va_X_set, va_Y_onehot, va_M)       \n",
    "    Lt, Lp, Lkl, Lps, Lkls, Lc, _, _    = model.get_loss(x_mb_set, y_mb, m_mb, alpha, beta)\n",
    "    \n",
    "    va_avg_Lt   += Lt/STEPSIZE\n",
    "    va_avg_Lp   += Lp/STEPSIZE\n",
    "    va_avg_Lkl  += Lkl/STEPSIZE\n",
    "    va_avg_Lps  += Lps/STEPSIZE\n",
    "    va_avg_Lkls += Lkls/STEPSIZE\n",
    "    va_avg_Lc   += Lc/STEPSIZE\n",
    "    \n",
    "    if (itr+1)%STEPSIZE == 0:\n",
    "        y_pred, y_preds = model.predict_ys(va_X_set, va_M)\n",
    "        \n",
    "#         score = \n",
    "\n",
    "        print( \"{:05d}: TRAIN| Lt={:.3f} Lp={:.3f} Lkl={:.3f} Lps={:.3f} Lkls={:.3f} Lc={:.3f} | VALID| Lt={:.3f} Lp={:.3f} Lkl={:.3f} Lps={:.3f} Lkls={:.3f} Lc={:.3f} score={}\".format(\n",
    "            itr+1, tr_avg_Lt, tr_avg_Lp, tr_avg_Lkl, tr_avg_Lps, tr_avg_Lkls, tr_avg_Lc,  \n",
    "            va_avg_Lt, va_avg_Lp, va_avg_Lkl, va_avg_Lps, va_avg_Lkls, va_avg_Lc, evaluate(va_Y_onehot, np.mean(y_preds, axis=0), y_type))\n",
    "             )\n",
    "            \n",
    "        if min_loss > va_avg_Lt:\n",
    "            min_loss  = va_avg_Lt\n",
    "            stop_flag = 0\n",
    "            saver.save(sess,save_path  + 'itr{}/best_model'.format(out_itr))\n",
    "            print('saved...')\n",
    "        else:\n",
    "            stop_flag += 1\n",
    "                           \n",
    "        tr_avg_Lt, tr_avg_Lp, tr_avg_Lkl, tr_avg_Lps, tr_avg_Lkls, tr_avg_Lc = 0, 0, 0, 0, 0, 0\n",
    "        va_avg_Lt, va_avg_Lp, va_avg_Lkl, va_avg_Lps, va_avg_Lkls, va_avg_Lc = 0, 0, 0, 0, 0, 0\n",
    "        \n",
    "        if stop_flag >= max_flag:\n",
    "            break\n",
    "            \n",
    "print('FINISHED...')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from TCGA_1YR/M4_incomplete/DeepIMV_AISTATS/itr1/best_model\n"
     ]
    }
   ],
   "source": [
    "saver.restore(sess, save_path  + 'itr{}/best_model'.format(out_itr))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Evaluation -- (Results in Table 3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "TEST - INCOMPLETE: auroc=0.7433  auprc=0.3541\n",
      "TEST - INCOMPLETE: auroc=0.7747  auprc=0.4077\n",
      "TEST - INCOMPLETE: auroc=0.7955  auprc=0.4004\n",
      "TEST - INCOMPLETE: auroc=0.8011  auprc=0.4138\n"
     ]
    }
   ],
   "source": [
    "for m_available in [1,2,3,4]:\n",
    "\n",
    "    tmp_M_mis = np.zeros_like(te_M)#np.copy(te_M)\n",
    "\n",
    "\n",
    "    for i in range(len(tmp_M_mis)):\n",
    "        np.random.seed(SEED+out_itr+i)\n",
    "        idx = np.random.choice(4, m_available, replace=False)\n",
    "        tmp_M_mis[i, idx] = 1\n",
    "\n",
    "\n",
    "    #for stablity of reducing randomness..\n",
    "    for i in range(100):\n",
    "        _, tmp_preds_all = model.predict_ys(te_X_set, tmp_M_mis)\n",
    "        if i == 0:\n",
    "            y_preds_all = tmp_preds_all\n",
    "        else:\n",
    "            y_preds_all = np.concatenate([y_preds_all, tmp_preds_all], axis=0)\n",
    "\n",
    "    auc1, apc1 = evaluate(te_Y_onehot, y_preds_all.mean(axis=0), y_type)\n",
    "\n",
    "    RESULTS_AUROC_RAND[m_available-1, out_itr] = auc1\n",
    "    RESULTS_AUPRC_RAND[m_available-1, out_itr] = apc1\n",
    "\n",
    "    print(\"TEST - {} - #VIEW {}: auroc={:.4f}  auprc={:.4f}\".format(MODE.upper(), m_available,  auc1, apc1))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}