Switch to unified view

a b/03-Experiments/Temp-XGBoost_FE_obj_Recall.ipynb
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "metadata": {},
6
   "source": [
7
    "# Global Experiment Setup"
8
   ]
9
  },
10
  {
11
   "cell_type": "code",
12
   "execution_count": 3,
13
   "metadata": {},
14
   "outputs": [
15
    {
16
     "data": {
17
      "text/plain": [
18
       "<Experiment: artifact_location='/Users/arham/Downloads/Projects/03-Experiments/mlruns/2', creation_time=1713912394972, experiment_id='2', last_update_time=1713912394972, lifecycle_stage='active', name='XGBoost', tags={}>"
19
      ]
20
     },
21
     "execution_count": 3,
22
     "metadata": {},
23
     "output_type": "execute_result"
24
    }
25
   ],
26
   "source": [
27
    "import mlflow\n",
28
    "\n",
29
    "\n",
30
    "# Set the MLflow tracking URI to a new SQLite URI\n",
31
    "mlflow.set_tracking_uri(\"sqlite:///new_mlflow.db\")\n",
32
    "mlflow.set_experiment(\"XGBoost\")\n",
33
    "\n"
34
   ]
35
  },
36
  {
37
   "cell_type": "code",
38
   "execution_count": 4,
39
   "metadata": {},
40
   "outputs": [],
41
   "source": [
42
    "import pandas as pd\n",
43
    "from sklearn.model_selection import train_test_split\n",
44
    "import matplotlib.pyplot as plt\n",
45
    "import seaborn as sns\n",
46
    "import numpy as np\n",
47
    "from sklearn.preprocessing import MinMaxScaler\n",
48
    "from sklearn.preprocessing import PolynomialFeatures\n",
49
    "import lightgbm as lgb\n",
50
    "from sklearn.metrics import accuracy_score\n",
51
    "import warnings\n",
52
    "from sklearn.tree import DecisionTreeClassifier\n",
53
    "from sklearn.model_selection import cross_val_score\n",
54
    "from sklearn.metrics import accuracy_score, precision_score, recall_score\n",
55
    "import xgboost as xgb\n",
56
    "from sklearn.metrics import accuracy_score, precision_score, recall_score\n",
57
    "from sklearn.model_selection import cross_val_score\n",
58
    "\n",
59
    "\n",
60
    "def load_data(path):\n",
61
    "    df = pd.read_csv(path)\n",
62
    "    train_df, test_df = train_test_split(df, test_size=0.35, random_state=42)\n",
63
    "    train_df, val_df,  = train_test_split(train_df, test_size=0.20, random_state=42)\n",
64
    "    train_df = train_df.drop(['id'], axis=1).drop_duplicates().reset_index(drop=True)\n",
65
    "    test_df = test_df.drop(['id'], axis=1).drop_duplicates().reset_index(drop=True)\n",
66
    "    val_df = val_df.drop(['id'], axis=1).drop_duplicates().reset_index(drop=True)\n",
67
    "    return train_df, val_df, test_df\n",
68
    "\n",
69
    "def encode_target(train):\n",
70
    "    target_key = {'Insufficient_Weight': 0, 'Normal_Weight': 1, 'Overweight_Level_I': 2, 'Overweight_Level_II': 3, 'Obesity_Type_I': 4,'Obesity_Type_II' : 5, 'Obesity_Type_III': 6}\n",
71
    "    train['NObeyesdad'] = train['NObeyesdad'].map(target_key)\n",
72
    "    return train\n",
73
    "\n",
74
    "def make_gender_binary(train):\n",
75
    "    train['Gender'] = train['Gender'].map({'Male':0, 'Female':1})\n",
76
    "\n",
77
    "def datatypes(train):\n",
78
    "    train['Weight'] = train['Weight'].astype(float)\n",
79
    "    train['Age'] = train['Age'].astype(float)\n",
80
    "    train['Height'] = train['Height'].astype(float)\n",
81
    "    return train\n",
82
    "\n",
83
    "# def age_binning(train_df):\n",
84
    "#     # train_df['Age_Group'] = pd.cut(train_df['Age'], bins=[0, 20, 30, 40, 50, train_df['Age'].max()], labels=['0-20', '21-30', '31-40', '41-50', '50+'])\n",
85
    "#     train_df['Age_Group'] = pd.cut(train_df['Age'], bins=[0, 20, 30, 40, 50, train_df['Age'].max()], labels=[1, 2, 3, 4, 5])\n",
86
    "#     train_df['Age_Group'] = train_df['Age_Group'].astype(int)\n",
87
    "#     return train_df\n",
88
    "\n",
89
    "def age_binning(df):\n",
90
    "    age_groups = []\n",
91
    "    for age in df['Age']:\n",
92
    "        if age <= 20:\n",
93
    "            age_group = 1\n",
94
    "        elif age <= 30:\n",
95
    "            age_group = 2\n",
96
    "        elif age <= 40:\n",
97
    "            age_group = 3\n",
98
    "        elif age <= 50:\n",
99
    "            age_group = 4\n",
100
    "        else:\n",
101
    "            age_group = 5\n",
102
    "        age_groups.append(age_group)\n",
103
    "    df['Age_Group'] = age_groups\n",
104
    "    return df\n",
105
    "\n",
106
    "def age_scaling_log(train_df):\n",
107
    "    train_df['Age'] = train_df['Age'].astype(float)\n",
108
    "    train_df['Log_Age'] = np.log1p(train_df['Age'])\n",
109
    "    return train_df\n",
110
    "\n",
111
    "def age_scaling_minmax(train_df):\n",
112
    "    train_df['Age'] = train_df['Age'].astype(float)\n",
113
    "    scaler_age = MinMaxScaler()\n",
114
    "    train_df['Scaled_Age'] = scaler_age.fit_transform(train_df['Age'].values.reshape(-1, 1))\n",
115
    "    return train_df, scaler_age\n",
116
    "\n",
117
    "def weight_scaling_log(train_df):\n",
118
    "    train_df['Weight'] = train_df['Weight'].astype(float)\n",
119
    "    train_df['Log_Weight'] = np.log1p(train_df['Weight'])\n",
120
    "    return train_df\n",
121
    "\n",
122
    "def weight_scaling_minmax(train_df):\n",
123
    "    train_df['Weight'] = train_df['Weight'].astype(float)\n",
124
    "    scaler_weight = MinMaxScaler()\n",
125
    "    train_df['Scaled_Weight'] = scaler_weight.fit_transform(train_df['Weight'].values.reshape(-1, 1))\n",
126
    "    return train_df, scaler_weight\n",
127
    "\n",
128
    "def height_scaling_log(train_df):\n",
129
    "    train_df['Log_Height'] = np.log1p(train_df['Height'])\n",
130
    "    return train_df\n",
131
    "\n",
132
    "def height_scaling_minmax(train_df):\n",
133
    "    scaler_height = MinMaxScaler()\n",
134
    "    train_df['Scaled_Height'] = scaler_height.fit_transform(train_df['Height'].values.reshape(-1, 1))\n",
135
    "    return train_df, scaler_height\n",
136
    "\n",
137
    "def make_gender_binary(train):\n",
138
    "    train['Gender'] = train['Gender'].map({'Female':1, 'Male':0})\n",
139
    "    return train\n",
140
    "\n",
141
    "def fix_binary_columns(train):\n",
142
    "    Binary_Cols = ['family_history_with_overweight','FAVC', 'SCC','SMOKE']\n",
143
    "    # if yes then 1 else 0\n",
144
    "    for col in Binary_Cols:\n",
145
    "        train[col] = train[col].map({'yes': 1, 'no': 0})\n",
146
    "        # column datatype integer\n",
147
    "        train[col] = train[col].astype(int)\n",
148
    "    return train\n",
149
    "\n",
150
    "def freq_cat_cols(train):\n",
151
    "    # One hot encoding\n",
152
    "    cat_cols = ['CAEC', 'CALC']\n",
153
    "    for col in cat_cols:\n",
154
    "        train[col] = train[col].map({'no': 0, 'Sometimes': 1, 'Frequently': 2, 'Always': 3})\n",
155
    "    return train\n",
156
    "\n",
157
    "def Mtrans(train):\n",
158
    "    \"\"\"\n",
159
    "    Public_Transportation    8692\n",
160
    "    Automobile               1835\n",
161
    "    Walking                   231\n",
162
    "    Motorbike                  19\n",
163
    "    Bike                       16\n",
164
    "    \"\"\"\n",
165
    "    # train['MTRANS'] = train['MTRANS'].map({'Public_Transportation': 3, 'Automobile': 5, 'Walking': 1, 'Motorbike': 4, 'Bike': 2})\n",
166
    "    # dummify column\n",
167
    "    train = pd.get_dummies(train, columns=['MTRANS'])\n",
168
    "    # convert these columns to integer\n",
169
    "    train['MTRANS_Automobile'] = train['MTRANS_Automobile'].astype(int)\n",
170
    "    train['MTRANS_Walking'] = train['MTRANS_Walking'].astype(int)\n",
171
    "    train['MTRANS_Motorbike'] = train['MTRANS_Motorbike'].astype(int)\n",
172
    "    train['MTRANS_Bike'] = train['MTRANS_Bike'].astype(int)\n",
173
    "    train['MTRANS_Public_Transportation'] = train['MTRANS_Public_Transportation'].astype(int)\n",
174
    "    return train\n",
175
    "\n",
176
    "\n",
177
    "def other_features(train):\n",
178
    "    train['BMI'] = train['Weight'] / (train['Height'] ** 2)\n",
179
    "    # train['Age'*'Gender'] = train['Age'] * train['Gender']\n",
180
    "    polynomial_features = PolynomialFeatures(degree=2)\n",
181
    "    X_poly = polynomial_features.fit_transform(train[['Age', 'BMI']])\n",
182
    "    poly_features_df = pd.DataFrame(X_poly, columns=['Age^2', 'Age^3', 'BMI^2', 'Age * BMI', 'Age * BMI^2', 'Age^2 * BMI^2'])\n",
183
    "    train = pd.concat([train, poly_features_df], axis=1)\n",
184
    "    return train\n",
185
    "\n",
186
    "\n",
187
    "def test_pipeline(test, scaler_age, scaler_weight, scaler_height):\n",
188
    "    test = datatypes(test)\n",
189
    "    test = encode_target(test)\n",
190
    "    test = age_binning(test)\n",
191
    "    test = age_scaling_log(test)\n",
192
    "    test['Scaled_Age'] = scaler_age.transform(test['Age'].values.reshape(-1, 1))\n",
193
    "    test = weight_scaling_log(test)\n",
194
    "    test['Scaled_Weight'] = scaler_weight.transform(test['Weight'].values.reshape(-1, 1))\n",
195
    "    test = height_scaling_log(test)\n",
196
    "    test['Scaled_Height'] = scaler_height.transform(test['Height'].values.reshape(-1, 1))\n",
197
    "    test = make_gender_binary(test)\n",
198
    "    test = fix_binary_columns(test)\n",
199
    "    test = freq_cat_cols(test)\n",
200
    "    test = Mtrans(test)\n",
201
    "    test = other_features(test)\n",
202
    "\n",
203
    "    return test\n",
204
    "\n",
205
    "def train_model(params, X_train, y_train):\n",
206
    "    lgb_train = lgb.Dataset(X_train, y_train)\n",
207
    "    model = lgb.train(params, lgb_train, num_boost_round=1000)\n",
208
    "    return model\n",
209
    "\n",
210
    "def evaluate_model(model, X_val, y_val):\n",
211
    "    y_pred = model.predict(X_val)\n",
212
    "    y_pred = [np.argmax(y) for y in y_pred]\n",
213
    "    accuracy = accuracy_score(y_val, y_pred)\n",
214
    "    return accuracy\n",
215
    "\n",
216
    "def objective(trial, X_train, y_train):\n",
217
    "    params = {\n",
218
    "        'objective': 'multiclass',\n",
219
    "        'num_class': 7,\n",
220
    "        'metric': 'multi_logloss',\n",
221
    "        'boosting_type': 'gbdt',\n",
222
    "        'learning_rate': trial.suggest_loguniform('learning_rate', 0.005, 0.5),\n",
223
    "        'num_leaves': trial.suggest_int('num_leaves', 10, 1000),\n",
224
    "        'max_depth': trial.suggest_int('max_depth', -1, 20),\n",
225
    "        'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.6, 0.95),\n",
226
    "        'feature_fraction': trial.suggest_uniform('feature_fraction', 0.6, 0.95),\n",
227
    "        'verbosity': -1\n",
228
    "    }\n",
229
    "\n",
230
    "    n_splits = 5\n",
231
    "    kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)\n",
232
    "    scores = []\n",
233
    "\n",
234
    "    for train_index, val_index in kf.split(X_train, y_train):\n",
235
    "        X_tr, X_val = X_train.iloc[train_index], X_train.iloc[val_index]\n",
236
    "        y_tr, y_val = y_train.iloc[train_index], y_train.iloc[val_index]\n",
237
    "\n",
238
    "        model = train_model(params, X_tr, y_tr)\n",
239
    "        accuracy = evaluate_model(model, X_val, y_val)\n",
240
    "        scores.append(accuracy)\n",
241
    "\n",
242
    "    return np.mean(scores)\n",
243
    "\n",
244
    "def optimize_hyperparameters(X_train, y_train, n_trials=2):\n",
245
    "    study = optuna.create_study(direction='maximize')\n",
246
    "    study.optimize(lambda trial: objective(trial, X_train, y_train), n_trials=n_trials)\n",
247
    "    return study.best_params\n"
248
   ]
249
  },
250
  {
251
   "cell_type": "markdown",
252
   "metadata": {},
253
   "source": [
254
    "### XGB With Feature Engineering"
255
   ]
256
  },
257
  {
258
   "cell_type": "code",
259
   "execution_count": 5,
260
   "metadata": {},
261
   "outputs": [],
262
   "source": [
263
    "\n",
264
    "path = '/Users/arham/Downloads/Projects/01-Dataset/01-Data-for-model-building/train.csv'\n",
265
    "train_df, val_df, test_df = load_data(path)\n",
266
    "\n",
267
    "train_df = datatypes(train_df)\n",
268
    "train_df = encode_target(train_df)\n",
269
    "train_df = age_binning(train_df)\n",
270
    "train_df, scaler_age = age_scaling_minmax(train_df)\n",
271
    "train_df = age_scaling_log(train_df)\n",
272
    "train_df, scaler_weight = weight_scaling_minmax(train_df)\n",
273
    "train_df = weight_scaling_log(train_df)\n",
274
    "train_df, scaler_height = height_scaling_minmax(train_df)\n",
275
    "train_df = height_scaling_log(train_df)\n",
276
    "train_df = make_gender_binary(train_df)\n",
277
    "train_df = fix_binary_columns(train_df)\n",
278
    "train_df = freq_cat_cols(train_df)\n",
279
    "train_df = Mtrans(train_df)\n",
280
    "train_df = other_features(train_df)\n",
281
    "\n",
282
    "val_df = test_pipeline(val_df, scaler_age, scaler_weight, scaler_height)\n",
283
    "test_df = test_pipeline(test_df, scaler_age, scaler_weight, scaler_height)\n",
284
    "\n",
285
    "Target = 'NObeyesdad'\n",
286
    "# features = train_df.columns.drop(Target)\n",
287
    "features = ['Gender', 'Age', 'Height', 'Weight', 'family_history_with_overweight',\n",
288
    "       'FAVC', 'FCVC', 'NCP', 'CAEC', 'SMOKE', 'CH2O', 'SCC', 'FAF', 'TUE',\n",
289
    "       'CALC', 'Age_Group', \n",
290
    "       'MTRANS_Automobile', 'MTRANS_Bike', 'MTRANS_Motorbike',\n",
291
    "       'MTRANS_Public_Transportation', 'MTRANS_Walking', 'BMI', 'Age^2',\n",
292
    "       'Age^3', 'BMI^2', 'Age * BMI', 'Age * BMI^2', 'Age^2 * BMI^2'] \n",
293
    "\n",
294
    "       #'Scaled_Age', 'Log_Age', 'Scaled_Weight', 'Log_Weight', 'Scaled_Height', 'Log_Height',\n",
295
    "\n",
296
    "\n",
297
    "X_train = train_df[features]\n",
298
    "y_train = train_df[Target]\n",
299
    "X_val = val_df[features]\n",
300
    "y_val = val_df[Target]\n",
301
    "X_test = test_df[features]\n",
302
    "y_test = test_df[Target]\n",
303
    "\n",
304
    "# save X_train, y_train, X_val, X_test, y_test\n"
305
   ]
306
  },
307
  {
308
   "cell_type": "code",
309
   "execution_count": 8,
310
   "metadata": {},
311
   "outputs": [
312
    {
313
     "name": "stdout",
314
     "output_type": "stream",
315
     "text": [
316
      "Target Drift For Each Class [0.004943133623686147, 0.011990707821925795, -0.0087675011457998, -0.001077949504617301, -0.017190035106736085, -0.00032756263090533144, 0.01042920694244659]\n",
317
      "Cross-validation Scores (XGBoost): [nan nan nan nan nan]\n",
318
      "Mean CV Accuracy (XGBoost): nan\n"
319
     ]
320
    },
321
    {
322
     "ename": "TypeError",
323
     "evalue": "XGBClassifier.fit() got an unexpected keyword argument 'objective'",
324
     "output_type": "error",
325
     "traceback": [
326
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
327
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
328
      "Cell \u001b[0;32mIn[8], line 23\u001b[0m\n\u001b[1;32m     21\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCross-validation Scores (XGBoost):\u001b[39m\u001b[38;5;124m\"\u001b[39m, cv_scores_xgb)\n\u001b[1;32m     22\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMean CV Accuracy (XGBoost):\u001b[39m\u001b[38;5;124m\"\u001b[39m, cv_scores_xgb\u001b[38;5;241m.\u001b[39mmean())\n\u001b[0;32m---> 23\u001b[0m \u001b[43mxgb_classifier\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mobjective\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mmulti:softmax\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m     24\u001b[0m y_val_pred_xgb \u001b[38;5;241m=\u001b[39m xgb_classifier\u001b[38;5;241m.\u001b[39mpredict(X_val)\n\u001b[1;32m     25\u001b[0m accuracy_xgb \u001b[38;5;241m=\u001b[39m accuracy_score(y_val, y_val_pred_xgb)\n",
329
      "File \u001b[0;32m~/anaconda3/envs/DataScience/lib/python3.10/site-packages/xgboost/core.py:620\u001b[0m, in \u001b[0;36mrequire_keyword_args.<locals>.throw_if.<locals>.inner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m    618\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, arg \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(sig\u001b[38;5;241m.\u001b[39mparameters, args):\n\u001b[1;32m    619\u001b[0m     kwargs[k] \u001b[38;5;241m=\u001b[39m arg\n\u001b[0;32m--> 620\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
330
      "\u001b[0;31mTypeError\u001b[0m: XGBClassifier.fit() got an unexpected keyword argument 'objective'"
331
     ]
332
    }
333
   ],
334
   "source": [
335
    "\n",
336
    "import xgboost as xgb\n",
337
    "from sklearn.model_selection import cross_val_score\n",
338
    "from sklearn.metrics import accuracy_score, precision_score, recall_score\n",
339
    "import mlflow\n",
340
    "import warnings\n",
341
    "warnings.filterwarnings(\"ignore\")\n",
342
    "# import precision_recall_fscore_support\n",
343
    "from sklearn.metrics import precision_recall_fscore_support\n",
344
    "\n",
345
    "mlflow.sklearn.autolog(disable=True)\n",
346
    "\n",
347
    "with mlflow.start_run(run_name=\"XGB_FE_Recall\"):\n",
348
    "    class_counts_train = [y_train[y_train == i].count() / y_train.count() for i in range(7)]\n",
349
    "    class_counts_val = [y_val[y_val == i].count() / y_val.count() for i in range(7)]\n",
350
    "    target_drift = [(train_count - val_count) for train_count, val_count in zip(class_counts_train, class_counts_val)]\n",
351
    "    print(f\"Target Drift For Each Class {target_drift}\")\n",
352
    "    mlflow.log_params({'Target_Drift_' + str(i): freq for i, freq in enumerate(target_drift)})\n",
353
    "\n",
354
    "    xgb_classifier = xgb.XGBClassifier()\n",
355
    "    cv_scores_xgb = cross_val_score(xgb_classifier, X_train, y_train, cv=5, scoring='recall')\n",
356
    "    print(\"Cross-validation Scores (XGBoost):\", cv_scores_xgb)\n",
357
    "    print(\"Mean CV Accuracy (XGBoost):\", cv_scores_xgb.mean())\n",
358
    "    xgb_classifier.fit(X_train, y_train)\n",
359
    "    y_val_pred_xgb = xgb_classifier.predict(X_val)\n",
360
    "    accuracy_xgb = accuracy_score(y_val, y_val_pred_xgb)\n",
361
    "    precision_xgb = precision_score(y_val, y_val_pred_xgb, average='weighted')\n",
362
    "    recall_xgb = recall_score(y_val, y_val_pred_xgb, average='weighted')\n",
363
    "    f1_xgb = 2 * (precision_xgb * recall_xgb) / (precision_xgb + recall_xgb)\n",
364
    "    print(\"\\nAccuracy (XGBoost):\", accuracy_xgb)\n",
365
    "    print(\"Precision (XGBoost):\", precision_xgb)\n",
366
    "    print(\"Recall (XGBoost):\", recall_xgb)\n",
367
    "    print(\"F1 (XGBoost):\", f1_xgb)\n",
368
    "    mlflow.log_metric('accuracy', accuracy_xgb)\n",
369
    "    mlflow.log_metric('precision', precision_xgb)\n",
370
    "    mlflow.log_metric('recall', recall_xgb)\n",
371
    "    mlflow.log_metric('f1', f1_xgb)\n",
372
    "\n",
373
    "    precision_per_class, recall_per_class, f1_per_class, support_per_class = precision_recall_fscore_support(y_val, y_val_pred_xgb, average=None)\n",
374
    "    for i in range(len(recall_per_class)):\n",
375
    "        print(f\"Recall for class {i}: {recall_per_class[i]}\")\n",
376
    "        mlflow.log_metric(f'recall_class_{i}', recall_per_class[i])\n",
377
    "\n",
378
    "    mlflow.xgboost.log_model(xgb_classifier, 'model')\n",
379
    "    mlflow.set_tag('experiments', 'Arham A.')\n",
380
    "    mlflow.set_tag('model_name', 'XGBoost')\n",
381
    "    mlflow.set_tag('preprocessing', 'Yes')\n"
382
   ]
383
  },
384
  {
385
   "cell_type": "code",
386
   "execution_count": 7,
387
   "metadata": {},
388
   "outputs": [
389
    {
390
     "name": "stdout",
391
     "output_type": "stream",
392
     "text": [
393
      "[2024-04-25 14:27:06 -0400] [12399] [INFO] Starting gunicorn 21.2.0\n",
394
      "[2024-04-25 14:27:06 -0400] [12399] [INFO] Listening at: http://127.0.0.1:5000 (12399)\n",
395
      "[2024-04-25 14:27:06 -0400] [12399] [INFO] Using worker: sync\n",
396
      "[2024-04-25 14:27:06 -0400] [12400] [INFO] Booting worker with pid: 12400\n",
397
      "[2024-04-25 14:27:06 -0400] [12401] [INFO] Booting worker with pid: 12401\n",
398
      "[2024-04-25 14:27:06 -0400] [12402] [INFO] Booting worker with pid: 12402\n",
399
      "[2024-04-25 14:27:06 -0400] [12403] [INFO] Booting worker with pid: 12403\n",
400
      "^C\n",
401
      "[2024-04-25 14:34:22 -0400] [12399] [INFO] Handling signal: int\n",
402
      "[2024-04-25 14:34:22 -0400] [12400] [INFO] Worker exiting (pid: 12400)\n",
403
      "[2024-04-25 14:34:22 -0400] [12401] [INFO] Worker exiting (pid: 12401)\n",
404
      "[2024-04-25 14:34:22 -0400] [12403] [INFO] Worker exiting (pid: 12403)\n",
405
      "[2024-04-25 14:34:22 -0400] [12402] [INFO] Worker exiting (pid: 12402)\n"
406
     ]
407
    }
408
   ],
409
   "source": [
410
    "!mlflow ui --backend-store-uri \"sqlite:////Users/arham/Downloads/Projects/03-Experiments/new_mlflow.db\""
411
   ]
412
  },
413
  {
414
   "cell_type": "code",
415
   "execution_count": null,
416
   "metadata": {},
417
   "outputs": [],
418
   "source": []
419
  }
420
 ],
421
 "metadata": {
422
  "kernelspec": {
423
   "display_name": "DataScience",
424
   "language": "python",
425
   "name": "python3"
426
  },
427
  "language_info": {
428
   "codemirror_mode": {
429
    "name": "ipython",
430
    "version": 3
431
   },
432
   "file_extension": ".py",
433
   "mimetype": "text/x-python",
434
   "name": "python",
435
   "nbconvert_exporter": "python",
436
   "pygments_lexer": "ipython3",
437
   "version": "3.10.13"
438
  }
439
 },
440
 "nbformat": 4,
441
 "nbformat_minor": 2
442
}