a b/strokepred.py
1
import pandas as pd
2
from sklearn.model_selection import train_test_split
3
from sklearn.preprocessing import StandardScaler, OneHotEncoder
4
from sklearn.compose import ColumnTransformer
5
from sklearn.pipeline import Pipeline
6
from sklearn.impute import SimpleImputer
7
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
8
from sklearn.metrics import f1_score, classification_report
9
from sklearn.model_selection import GridSearchCV
10
11
# Load the datasets
12
train_set = pd.read_csv('/path/to/stroke_train_set.csv')
13
test_set = pd.read_csv('/path/to/stroke_test_set_nogt.csv')
14
15
# Data Preprocessing
16
# Separating the target variable and features in the training set
17
X = train_set.drop('stroke', axis=1)
18
y = train_set['stroke']
19
20
# Identifying numerical and categorical columns
21
numerical_cols = X.select_dtypes(include=['float64', 'int64']).columns
22
categorical_cols = X.select_dtypes(include=['object']).columns
23
24
# Creating a preprocessor with transformations for different column types
25
preprocessor = ColumnTransformer(
26
    transformers=[
27
        ('num', SimpleImputer(strategy='median'), numerical_cols),
28
        ('cat', OneHotEncoder(), categorical_cols)
29
    ])
30
31
# Creating a pipeline with preprocessing and a classifier
32
model = Pipeline(steps=[('preprocessor', preprocessor),
33
                        ('classifier', RandomForestClassifier(random_state=42))])
34
35
# Splitting the dataset into training and validation sets
36
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
37
38
# Fit the model
39
model.fit(X_train, y_train)
40
41
# Predict on validation set and evaluate
42
y_pred = model.predict(X_val)
43
f1 = f1_score(y_val, y_pred)
44
print(f"F1 Score: {f1}")
45
print(classification_report(y_val, y_pred))
46
47
# Hyperparameter Tuning and Alternative Model
48
model_gb = Pipeline(steps=[
49
    ('preprocessor', preprocessor),
50
    ('classifier', GradientBoostingClassifier(random_state=42))
51
])
52
53
param_grid = {
54
    'classifier__n_estimators': [100, 200],
55
    'classifier__learning_rate': [0.01, 0.1],
56
    'classifier__max_depth': [3, 5]
57
}
58
59
grid_search = GridSearchCV(model_gb, param_grid, cv=5, scoring='f1', n_jobs=-1)
60
grid_search.fit(X_train, y_train)
61
62
# Best parameters and model performance
63
best_params = grid_search.best_params_
64
y_pred_gb = grid_search.predict(X_val)
65
f1_gb = f1_score(y_val, y_pred_gb)
66
print(f"Best Parameters: {best_params}")
67
print(f"F1 Score (Gradient Boosting): {f1_gb}")
68
print(classification_report(y_val, y_pred_gb))
69
70
# Preparing final predictions on the test set
71
final_predictions = model.predict(test_set)  # Using the best model from above
72
submission_df = pd.DataFrame({'ID': test_set.index, 'stroke': final_predictions})
73
74
# Saving the submission file
75
submission_df.to_csv('/path/to/final_submission.csv', index=False)