--- a +++ b/Linear Regression Model.ipynb @@ -0,0 +1,171 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd \n", + "import matplotlib.pylab as plt \n", + "from matplotlib import pyplot as plt1\n", + "import seaborn as sns \n", + "from sklearn.model_selection import train_test_split \n", + "# read the datafile using panda library. ensure right file location on machine. \n", + "data = pd.read_csv(r\"C:\\Users\\SAARTH CHAHAL\\Desktop\\Programming\\AIML\\smoking_driking_dataset_Ver01.csv\")\n", + "# EDA (Exploratory Data Analysis): \n", + "# Determine number of rows and colums in the provided data\n", + "data.shape \n", + "# print(data.shape)\n", + "data.head()\n", + "data.columns\n", + "data.nunique(axis=0)\n", + "\n", + "# Describe to Understanding the dataset. \n", + "data.describe().apply(lambda s: s.apply(lambda x: format(x, 'f')))\n", + "\n", + "# Cleaning dataset by removing null values using method. on visual examination of data there was no null value\n", + "data_cleaned = data.dropna(axis=0)\n", + "print(data_cleaned.shape) \n", + "\n", + "# Cleaning dataset by removing outliers \n", + "# waistline range of 25 to 150 is based on observation of the data \n", + "data_cleaned = data_cleaned[data_cleaned['waistline'].between(25,150)] \n", + "# sight_left above 5 is based on observation of the data \n", + "data_cleaned = data_cleaned[data_cleaned['sight_left'] < 5 ]\n", + "# sight_right above 5 is based on observation of the data \n", + "data_cleaned = data_cleaned[data_cleaned['sight_right'] < 5 ]\n", + "#since in correlation down the line we will require all number we will need to drop sex which takes string as input. \n", + "data_cleaned = data_cleaned.drop('sex',axis=1) \n", + "# convert drinker as Y or N \n", + "data_cleaned['DRK_YN'] = np.where(data_cleaned['DRK_YN'] == 'Y', 1,0 ) \n", + "\n", + "\n", + "data_cleaned.shape\n", + "\n", + "data_cleaned.describe().apply(lambda s: s.apply(lambda x: format(x, 'f')))\n", + "\n", + "# Able to reduce 5803 records which are outliers in the data. \n", + "# not changing any data related to BP as data seems to be in range. \n", + "\n", + "# Data Plotting exercise\n", + "# to analyze relation ship between variables. \n", + "# calculate the correlation matrix. \n", + "# There are too many variables to produce more readable correlation matrix and heatmap\n", + "# Created 2 smaller array for matrix and heatmap for smoke and drink correlation \n", + "\n", + "dfdata= pd.DataFrame(data_cleaned) \n", + "dfdata_smk=dfdata[['tot_chole','HDL_chole','LDL_chole','triglyceride','hemoglobin','SMK_stat_type_cd']]\n", + "dfdata_drk=dfdata[['urine_protein','serum_creatinine','SGOT_AST','SGOT_ALT','gamma_GTP','DRK_YN']]\n", + "corr_matrix_smk = dfdata_smk.corr()\n", + "corr_matrix_drk = dfdata_drk.corr()\n", + "\n", + "# plot the heatmap \n", + "sns.heatmap(corr_matrix_smk, xticklabels=corr_matrix_smk.columns, yticklabels=corr_matrix_smk.columns, annot=True, cmap=sns.diverging_palette(220, 20, as_cmap=True))\n", + "sns.heatmap(corr_matrix_drk, xticklabels=corr_matrix_drk.columns, yticklabels=corr_matrix_drk.columns, annot=True, cmap=sns.diverging_palette(220, 20, as_cmap=True))\n", + "\n", + "# scatter plots for two variables \n", + "dfdata.plot(kind='scatter', x='tot_chole', y='SMK_stat_type_cd')\n", + "dfdata.plot(kind='scatter', x='SGOT_AST', y='DRK_YN') \n", + "\n", + "# sns.pairplot for few variables \n", + "sns.pairplot ( dfdata ,\n", + "x_vars=[\"age\" ,\"waistline\", \"tot_chole\" , \"SGOT_AST\" , \"SMK_stat_type_cd\" , \"DRK_YN\" ], \n", + "y_vars=[\"age\" ,\"waistline\", \"tot_chole\" , \"SGOT_AST\"] , ) \n", + "\n", + "# Model training Module \n", + "# Learning model \n", + "\n", + "from sklearn.model_selection import train_test_split \n", + "# Train learning regression model \n", + "# We will need to first split up our data into an X1 array(cholesterol) that contains the features to train on, \n", + "# And a y1 array(SMK_stat_type_cd) with the target variable, \n", + "X1=dfdata[['tot_chole','HDL_chole','LDL_chole','triglyceride','hemoglobin']]\n", + "y1=dfdata['SMK_stat_type_cd']\n", + "# split up our data into an X2 array(Kidney function) that contains the features to train on, \n", + "# And a y2 array(DRK_YN)\n", + "X2=dfdata[['urine_protein','serum_creatinine','SGOT_AST','SGOT_ALT','gamma_GTP']]\n", + "y2=dfdata['DRK_YN']\n", + "\n", + "# Train test split. test split is 40 % train set is 60 % \n", + "X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.4, random_state=42)\n", + "X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.4, random_state=42)\n", + "\n", + "# #Loading the linear regression Model\n", + "\n", + "from sklearn.linear_model import LinearRegression \n", + "\n", + "lm1 = LinearRegression() \n", + "lm2= LinearRegression() \n", + "lm1.fit(X1_train,y1_train) \n", + "lm2.fit(X2_train,y2_train) \n", + "# prediction on Training data \n", + "# prediction on Training data \n", + "training_data_prediction1 = lm1.predict(X1_train) \n", + "training_data_prediction2 = lm2.predict(X2_train) \n", + "\n", + "# Model evlauation. \n", + "# Let's evaluate the model by checking out it's coefficients and how we can interpret them.\n", + "print(lm1.intercept_)\n", + "coeff_df1 = pd.DataFrame(lm1.coef_,X1.columns,columns=['Coefficient'])\n", + "coeff_df1 \n", + "print(lm2.intercept_)\n", + "coeff_df2 = pd.DataFrame(lm2.coef_,X2.columns,columns=['Coefficient'])\n", + "coeff_df2 \n", + "## interpreting the coefficient.\n", + "# For every one unit change in smoke status there is negative impact on Cholestrol ( refelcted as negative)\n", + "# and increase in triglyceride and hemoglobin which negatively affect the health indicator. \n", + "\n", + "# # Prediction from Model. \n", + "\n", + "predictions = lm1.predict(X1_test)\n", + "predictions = lm2.predict(X2_test)\n", + "plt1.scatter(y1_test,predictions)\n", + "sns.displot((y1_test-predictions),bins=50); \n", + "plt1.scatter(y2_test,predictions)\n", + "sns.displot((y2_test-predictions),bins=50);\n", + "\n", + "# Regression Evaluation Metrics\n", + "# Here are three common evaluation metrics for regression problems:\n", + "# Mean Absolute Error** (MAE) is the mean of the absolute value of the errors: is the easiest to understand, because it's the average error.\n", + "# Mean Squared Error** (MSE) is the mean of the squared errors: is more popular than MAE, because MSE \"punishes\" larger errors, which tends to be useful in the real world.\n", + "# Root Mean Squared Error** (RMSE) is the square root of the mean of the squared errors: is even more popular than MSE, because RMSE is interpretable in the \"y\" units.\n", + "\n", + "from sklearn import metrics\n", + "print('MAE:1',metrics.mean_absolute_error(y1_test, predictions))\n", + "print('MSE:1',metrics.mean_squared_error(y1_test, predictions))\n", + "print('RMSE:1',np.sqrt(metrics.mean_squared_error(y1_test, predictions)))\n", + "\n", + "print('MAE:2',metrics.mean_absolute_error(y2_test, predictions))\n", + "print('MSE:2',metrics.mean_squared_error(y2_test, predictions))\n", + "print('RMSE:2',np.sqrt(metrics.mean_squared_error(y2_test, predictions)))\n", + "\n", + "\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +}