--- a +++ b/.ipynb_checkpoints/Null-classifier-checkpoint.ipynb @@ -0,0 +1,363 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h1 align=\"center\">Machine learning-based prediction of early recurrence in glioblastoma patients: a glance towards precision medicine <br><br>[Null-classifier]</h1>" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h2>[1] Library</h2>" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.neighbors.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.neighbors. Anything that cannot be imported from sklearn.neighbors is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n", + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.ensemble.bagging module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.ensemble. Anything that cannot be imported from sklearn.ensemble is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n", + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.ensemble.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.ensemble. Anything that cannot be imported from sklearn.ensemble is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n", + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.ensemble.forest module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.ensemble. Anything that cannot be imported from sklearn.ensemble is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n", + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.utils.testing module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.utils. Anything that cannot be imported from sklearn.utils is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n", + "/Users/valerio_mc/opt/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n" + ] + } + ], + "source": [ + "# OS library\n", + "import os\n", + "import sys\n", + "import argparse\n", + "import itertools\n", + "import random\n", + "\n", + "# Analysis\n", + "import numpy as np\n", + "import pandas as pd\n", + "import seaborn as sns\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Sklearn\n", + "from boruta import BorutaPy\n", + "from sklearn.preprocessing import LabelEncoder\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.metrics import confusion_matrix, f1_score, recall_score, classification_report, accuracy_score, auc, roc_curve\n", + "from sklearn.model_selection import RandomizedSearchCV\n", + "from sklearn.dummy import DummyClassifier\n", + "\n", + "import scikitplot as skplt\n", + "from imblearn.over_sampling import RandomOverSampler, SMOTENC, SMOTE" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h2>[2] Exploratory data analysis and Data Preprocessing</h2>" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4>[-] Load the database</h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "file = os.path.join(sys.path[0], \"db.xlsx\")\n", + "db = pd.read_excel(file)\n", + "\n", + "print(\"N° of patients: {}\".format(len(db)))\n", + "print(\"N° of columns: {}\".format(db.shape[1]))\n", + "db.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4>[-] Drop unwanted columns + create <i>'results'</i> column</h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = db.drop(['Name_Surname','...'], axis = 'columns')\n", + "\n", + "print(\"Effective features to consider: {} \".format(len(df.columns)-1))\n", + "print(\"Creating 'result' column...\")\n", + "\n", + "# 0 = No relapse\n", + "df.loc[df['PFS'] > 6, 'result'] = 0\n", + "\n", + "# 1 = Early relapse (within 6 months)\n", + "df.loc[df['PFS'] <= 6, 'result'] = 1\n", + "\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4>[-] Check for class imbalance in the <i>'results'</i> column </h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"PFS Overview\")\n", + "print(df.result.value_counts())\n", + "\n", + "df.result.value_counts().plot(kind='pie', autopct='%1.0f%%', colors=['skyblue', 'orange'], explode=(0.05, 0.05))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4>[-] Label encoding of the categorical variables </h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df['sex'] =df['sex'].astype('category')\n", + "df['ceus'] =df['ceus'].astype('category')\n", + "df['ala'] =df['ala'].astype('category')\n", + "\n", + "#df['Ki67'] =df['Ki67'].astype(int)\n", + "df['MGMT'] =df['MGMT'].astype('category')\n", + "df['IDH1'] =df['IDH1'].astype('category')\n", + "\n", + "df['side'] =df['side'].astype('category')\n", + "df['ependima'] =df['ependima'].astype('category')\n", + "df['cc'] =df['cc'].astype('category')\n", + "df['necrotico_cistico'] =df['necrotico_cistico'].astype('category')\n", + "df['shift'] =df['shift'].astype('category')\n", + "\n", + "## VARIABLE TO ONE-HOT-ENCODE\n", + "df['localization'] =df['localization'].astype(int)\n", + "df['clinica_esordio'] =df['clinica_esordio'].astype(int)\n", + "df['immediate_p_o'] =df['immediate_p_o'].astype(int)\n", + "df['onco_Protocol'] =df['onco_Protocol'].astype(int)\n", + "\n", + "df['result'] =df['result'].astype(int)\n", + "\n", + "dummy_v = ['localization', 'clinica_esordio', 'onco_Protocol', 'immediate_p_o']\n", + "\n", + "df = pd.get_dummies(df, columns = dummy_v, prefix = dummy_v)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h2>[3] Prediction Models</h2>" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4> [-] Training and testing set splitting</h4>" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": true + }, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'df' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-5-48cdcc32916c>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtarget\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'result'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdrop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'result'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'PFS'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'columns'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'df' is not defined" + ] + } + ], + "source": [ + "target = df['result']\n", + "inputs = df.drop(['result', 'PFS'], axis = 'columns')\n", + "x_train, x_test, y_train, y_test = train_test_split(inputs['...'],target,test_size=0.20, random_state=10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4> [-] Dummy Training </h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dummy_train = DummyClassifier(strategy=\"uniform\", random_state = 42)\n", + "dummy_train.fit(x_train, y_train)\n", + "\n", + "score_dummy_train = dummy_train.score(x_train, y_train)\n", + "print(\"Dummy Train accuracy ***TRAIN***: \", round(score_dummy_train*100,2), \"% \\n\")\n", + "\n", + "y_dummy_train_predicted = dummy_train.predict(x_train)\n", + "y_dummy_train_proba = dummy_train.predict_proba(x_train)\n", + "\n", + "cm_dummy_train = confusion_matrix(y_train, y_dummy_train_predicted)\n", + "print(cm_dummy_train, \"\\n\")\n", + "\n", + "false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, y_dummy_train_predicted)\n", + "roc_auc = auc(false_positive_rate, true_positive_rate)\n", + "\n", + "\n", + "print('1. The F-1 Score of the model {} \\n '.format(round(f1_score(y_train, y_dummy_train_predicted, average = 'macro'), 2)))\n", + "print('2. The Recall Score of the model {} \\n '.format(round(recall_score(y_train, y_dummy_train_predicted, average = 'macro'), 2)))\n", + "print('3. Classification report \\n {} \\n'.format(classification_report(y_train, y_dummy_train_predicted)))\n", + "print('3. AUC: \\n {} \\n'.format(roc_auc))\n", + "\n", + "tn, fp, fn, tp = cm_dummy_train.ravel()\n", + "\n", + "# Sensitivity, hit rate, Recall, or true positive rate\n", + "tpr = tp/(tp+fn)\n", + "print(\"Sensitivity (TPR): {}\".format(tpr))\n", + "\n", + "# Specificity or true negative rate\n", + "tnr = tn/(tn+fp)\n", + "print(\"Specificity (TNR): {}\".format(tnr))\n", + "\n", + "# Precision or positive predictive value\n", + "ppv = tp/(tp+fp)\n", + "print(\"Precision (PPV): {}\".format(ppv))\n", + "\n", + "# Negative predictive value\n", + "npv = tn/(tn+fn)\n", + "print(\"Negative Predictive Value (NPV): {}\".format(npv))\n", + "\n", + "# False positive rate\n", + "fpr = fp / (fp + tn)\n", + "print(\"False Positive Rate (FPV): {}\".format(fpr))\n", + "\n", + "tnr = tn/(tn+fp)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<h4> [-] Dummy Testing </h4>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dummy_testing = DummyClassifier(strategy=\"uniform\", random_state = 42)\n", + "dummy_testing.fit(x_test, y_test)\n", + "\n", + "score_dummy_testing = dummy_testing.score(x_test, y_test)\n", + "print(\"Dummy Test accuracy ***TEST***: \", round(score_dummy_testing*100,2), \"% \\n\")\n", + "\n", + "y_dummy_testing_predicted = dummy_testing.predict(x_test)\n", + "y_dummy_testing_proba = dummy_testing.predict_proba(x_test)\n", + "\n", + "cm_dummy_testing = confusion_matrix(y_test, y_dummy_testing_predicted)\n", + "print(cm_dummy_testing, \"\\n\")\n", + "\n", + "false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_dummy_testing_predicted)\n", + "roc_auc = auc(false_positive_rate, true_positive_rate)\n", + "\n", + "\n", + "print('1. The F-1 Score of the model {} \\n '.format(round(f1_score(y_test, y_dummy_testing_predicted, average = 'macro'), 2)))\n", + "print('2. The Recall Score of the model {} \\n '.format(round(recall_score(y_test, y_dummy_testing_predicted, average = 'macro'), 2)))\n", + "print('3. Classification report \\n {} \\n'.format(classification_report(y_test, y_dummy_testing_predicted)))\n", + "print('3. AUC: \\n {} \\n'.format(roc_auc))\n", + "\n", + "tn, fp, fn, tp = cm_dummy_train.ravel()\n", + "\n", + "# Sensitivity, hit rate, Recall, or true positive rate\n", + "tpr = tp/(tp+fn)\n", + "print(\"Sensitivity (TPR): {}\".format(tpr))\n", + "\n", + "# Specificity or true negative rate\n", + "tnr = tn/(tn+fp)\n", + "print(\"Specificity (TNR): {}\".format(tnr))\n", + "\n", + "# Precision or positive predictive value\n", + "ppv = tp/(tp+fp)\n", + "print(\"Precision (PPV): {}\".format(ppv))\n", + "\n", + "# Negative predictive value\n", + "npv = tn/(tn+fn)\n", + "print(\"Negative Predictive Value (NPV): {}\".format(npv))\n", + "\n", + "# False positive rate\n", + "fpr = fp / (fp + tn)\n", + "print(\"False Positive Rate (FPV): {}\".format(fpr))\n", + "\n", + "tnr = tn/(tn+fp)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}