Diff of /AICare-baselines/train.py [000000] .. [0f1df3]

Switch to unified view

a b/AICare-baselines/train.py
1
import lightning as L
2
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
3
from lightning.pytorch.loggers import CSVLogger
4
5
# from configs.experiments_mimic import hparams
6
from configs.exp import hparams
7
from datasets.loader.datamodule import EhrDataModule
8
from datasets.loader.load_los_info import get_los_info
9
from pipelines import DlPipeline, MlPipeline
10
11
def run_ml_experiment(config):
12
    los_config = get_los_info(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}')
13
    config.update({"los_info": los_config})
14
15
    # data
16
    dm = EhrDataModule(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}', batch_size=config["batch_size"])
17
    # logger
18
    checkpoint_filename = f'{config["model"]}-fold{config["fold"]}-seed{config["seed"]}'
19
    logger = CSVLogger(save_dir="logs", name=f'train/{config["dataset"]}/{config["task"]}', version=checkpoint_filename)
20
    L.seed_everything(config["seed"]) # seed for reproducibility
21
22
    # train/val/test
23
    pipeline = MlPipeline(config)
24
    trainer = L.Trainer(accelerator="cpu", max_epochs=1, logger=logger, num_sanity_val_steps=0)
25
    trainer.fit(pipeline, dm)
26
    perf = pipeline.cur_best_performance
27
    return perf
28
29
def run_dl_experiment(config):
30
    los_config = get_los_info(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}')
31
    config.update({"los_info": los_config})
32
33
    # data
34
    dm = EhrDataModule(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}', batch_size=config["batch_size"])
35
    # logger
36
    checkpoint_filename = f'{config["model"]}-fold{config["fold"]}-seed{config["seed"]}'
37
    if "time_aware" in config and config["time_aware"] == True:
38
        checkpoint_filename+="-ta" # time-aware loss applied
39
    logger = CSVLogger(save_dir="logs", name=f'train/{config["dataset"]}/{config["task"]}', version=checkpoint_filename)
40
41
    # EarlyStop and checkpoint callback
42
    if config["task"] in ["outcome", "multitask"]:
43
        early_stopping_callback = EarlyStopping(monitor="auprc", patience=config["patience"], mode="max",)
44
        checkpoint_callback = ModelCheckpoint(filename="best", monitor="auprc", mode="max")
45
    elif config["task"] == "los":
46
        early_stopping_callback = EarlyStopping(monitor="mae", patience=config["patience"], mode="min",)
47
        checkpoint_callback = ModelCheckpoint(filename="best", monitor="mae", mode="min")
48
49
    L.seed_everything(config["seed"]) # seed for reproducibility
50
51
    # train/val/test
52
    pipeline = DlPipeline(config)
53
    trainer = L.Trainer(accelerator="gpu", devices=[1], max_epochs=config["epochs"], logger=logger, callbacks=[early_stopping_callback, checkpoint_callback], num_sanity_val_steps=0)
54
    trainer.fit(pipeline, dm)
55
    perf = pipeline.cur_best_performance
56
    return perf
57
58
if __name__ == "__main__":
59
    best_hparams = hparams # [TO-SPECIFY]
60
    for i in range(len(best_hparams)):
61
        config = best_hparams[i]
62
        run_func = run_ml_experiment if config["model"] in ["RF", "DT", "GBDT", "XGBoost", "CatBoost", "LR", "LightGBM"] else run_dl_experiment
63
        seeds = [0] # [0,1,2,3,4]
64
        folds = ['nshot']
65
        for fold in folds:
66
            config["fold"] = fold
67
            for seed in seeds:
68
                config["seed"] = seed
69
                perf = run_func(config)
70
                print(f"{config}, Val Performance: {perf}")