Diff of /scripts/collect_shap.py [000000] .. [efd906]

Switch to unified view

a b/scripts/collect_shap.py
1
import argparse
2
import inspect
3
import os
4
import sys
5
6
# import warnings
7
from datetime import datetime
8
9
import numpy as np
10
import pandas as pd
11
import shap
12
from joblib import delayed
13
from sklearn.base import clone
14
from sklearn.model_selection import StratifiedKFold
15
from tqdm import tqdm
16
17
from _init_scripts import PredictionTask
18
from _utils import read_yaml, write_yaml, ProgressParallel
19
20
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
21
parentdir = os.path.dirname(currentdir)
22
sys.path.insert(0, parentdir)
23
24
from multipit.multi_model.latefusion import LateFusionClassifier
25
26
27
def main(params):
28
    """ """
29
30
    # 0. Read config file and save it in the results
31
    config = read_yaml(params.config)
32
    save_name = config["save_name"]
33
    if save_name is None:
34
        run_id = datetime.now().strftime(r"%m%d_%H%M%S")
35
        save_name = "exp_" + run_id
36
    save_dir = os.path.join(params.save_path, save_name)
37
    os.mkdir(save_dir)
38
    write_yaml(config, os.path.join(save_dir, "config.yaml"))
39
40
    # 1. fix random seeds for reproducibility
41
    seed = config["latefusion"]["seed"]
42
    np.random.seed(seed)
43
44
    # 2. Load data and define pipelines for each modality
45
    ptask = PredictionTask(config, survival=False, integration="late")
46
    ptask.load_data()
47
    X, y = ptask.data_concat.values, ptask.labels.loc[ptask.data_concat.index].values
48
    ptask.init_pipelines_latefusion()
49
50
    parallel = ProgressParallel(
51
        n_jobs=config["parallelization"]["n_jobs_repeats"],
52
        total=config["latefusion"]["n_repeats"],
53
    )
54
    list_shap = parallel(
55
        delayed(_fun_parallel)(
56
            ptask,
57
            X,
58
            y,
59
            r,
60
            disable_infos=(config["parallelization"]["n_jobs_repeats"] is not None)
61
            and (config["parallelization"]["n_jobs_repeats"] > 1),
62
        )
63
        for r in range(config["latefusion"]["n_repeats"])
64
    )
65
66
    shap_explain = {"clinical": [], "radiomics": [], "pathomics": [], "RNA": []}
67
    coefs_LR = {"clinical": [], "radiomics": [], "pathomics": [], "RNA": []}
68
69
    for results in list_shap:
70
        for moda, shapley in results[0].items():
71
            shap_explain[moda].append(shapley)
72
73
    for key, val in shap_explain.items():
74
        df_shap = pd.concat(val, axis=0, join="outer")
75
        df_shap.to_csv(os.path.join(save_dir, "Shap_" + key + ".csv"))
76
77
    if config["classifier"]["type"] == "LR":
78
        for results in list_shap:
79
            for moda, coefs in results[1].items():
80
                coefs_LR[moda].append(coefs)
81
82
        for key, val in coefs_LR.items():
83
            coefficients = np.stack(val, axis=-1)
84
            np.save(os.path.join(save_dir, "coef_LR_" + key + ".npy"), coefficients)
85
86
87
def _fun_parallel(prediction_task, X, y, r, disable_infos):
88
    """
89
    Collect SHAP values for several unimodal classifiers with cross-validation
90
91
    Parameters
92
    ----------
93
    prediction_task: PredictionTask object
94
95
    X: 2D array of shape (n_samples, n_features)
96
        Concatenation of the different modalities
97
98
    y: 1D array of shape (n_samples,)
99
        Binary outcome
100
101
    r: int
102
        Repeat number
103
104
    disable_infos: bool
105
106
    Returns
107
    -------
108
    shap_dict: dictionary
109
        Dictionary whose keys correspond to the different modalities (e.g., "RNA", "clinical") and the items correspond
110
        to pandas dataframe of size (n_samples, n_features) that contain the SHAP values collected across the test sets
111
        of the cross-validation scheme.
112
113
    coefs_dict: dictionary or None
114
        Dictionary whose keys correspond to the different modalities (e.g., "RNA", "clinical") and the items correspond
115
        to arrays of size (n_folds, n_features) that contain the linear coefficients collected across the different
116
        folds of the cross-validation scheme. None if the classifier type is not linear.
117
    """
118
119
    cv = StratifiedKFold(n_splits=10, shuffle=True)
120
    late_clf = LateFusionClassifier(
121
        estimators=prediction_task.late_estimators,
122
        cv=StratifiedKFold(n_splits=10, shuffle=True, random_state=np.random.seed(r)),
123
        **prediction_task.config["latefusion"]["args"]
124
    )
125
126
    shap_dict = {name: [] for name, *_ in late_clf.estimators}
127
128
    if prediction_task.config["classifier"]["type"] == "LR":
129
        coef_dict = {name: [] for name, *_ in late_clf.estimators}
130
131
    for fold_index, (train_index, test_index) in tqdm(
132
        enumerate(cv.split(np.zeros(len(y)), y)),
133
        leave=False,
134
        total=cv.get_n_splits(np.zeros(len(y))),
135
        disable=disable_infos,
136
    ):
137
        X_train, y_train, X_test, y_test = (
138
            X[train_index, :],
139
            y[train_index],
140
            X[test_index, :],
141
            y[test_index],
142
        )
143
        # Fit late fusion on the training set of the fold
144
        clf = clone(late_clf)
145
        clf.fit(X_train, y_train)
146
        # Collect SHAP values on the test set of the fold for each unimodal classifier
147
        for ind, (name, estim, features) in enumerate(clf.fitted_estimators_):
148
            X_background = X_train[:, features]
149
            bool_mask = ~(
150
                np.sum(np.isnan(X_background), axis=1)
151
                > clf.missing_threshold * len(features)
152
            )
153
            X_explain = X_test[:, features]
154
            bool_mask_explain = ~(
155
                np.sum(np.isnan(X_explain), axis=1)
156
                > clf.missing_threshold * len(features)
157
            )
158
            if clf.calibration is not None:
159
                explainer = shap.Explainer(
160
                    lambda x: (
161
                        clf.fitted_meta_estimators_[(ind,)].predict_proba(
162
                            estim.predict_proba(x)[:, 1].reshape(-1, 1)
163
                        )
164
                    ),
165
                    X_background[bool_mask, :],
166
                )
167
            else:
168
                explainer = shap.Explainer(
169
                    lambda x: estim.predict_proba(x), X_background[bool_mask, :]
170
                )
171
            shap_values = explainer(X_explain[bool_mask_explain, :])
172
            shap_df = pd.DataFrame(
173
                shap_values.values[:, :, 1],
174
                columns=prediction_task.data_concat.columns[features],
175
                index=prediction_task.data_concat.index.values[
176
                    test_index[bool_mask_explain]
177
                ],
178
            )
179
            shap_df["fold_index"] = fold_index
180
            shap_df["repeat"] = r
181
            shap_dict[name].append(shap_df)
182
            # Also collect coefficients for logistic regreression
183
            if prediction_task.config["classifier"]["type"] == "LR":
184
                if name == "RNA":
185
                    temp = np.zeros((1, 40))
186
                    temp[:, : estim[-1].coef_.shape[1]] = estim[-1].coef_
187
                    coef_dict[name].append(temp)
188
                else:
189
                    coef_dict[name].append(estim[-1].coef_)
190
191
    if prediction_task.config["classifier"]["type"] == "LR":
192
        coefs_dict = {name: np.vstack(value) for name, value in coef_dict.items()}
193
    else:
194
        coefs_dict = None
195
196
    shap_dict = {
197
        name: pd.concat(value, axis=0, join="outer")
198
        for name, value in shap_dict.items()
199
    }
200
201
    return shap_dict, coefs_dict
202
203
204
if __name__ == "__main__":
205
    args = argparse.ArgumentParser(description="Collect Shap")
206
    args.add_argument(
207
        "-c",
208
        "--config",
209
        type=str,
210
        help="config file path",
211
    )
212
    args.add_argument(
213
        "-s",
214
        "--save_path",
215
        type=str,
216
        help="save path",
217
    )
218
    main(params=args.parse_args())