Switch to unified view

a b/AICare-baselines/dl_tune.py
1
import hydra
2
import lightning as L
3
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
4
from lightning.pytorch.loggers import CSVLogger, WandbLogger
5
from omegaconf import DictConfig, OmegaConf
6
7
import wandb
8
from datasets.loader.datamodule import EhrDataModule
9
from datasets.loader.load_los_info import get_los_info
10
from pipelines import DlPipeline
11
12
# import os
13
# os.environ['WANDB_MODE'] = 'offline'
14
# os.environ['WANDB_LOG_LEVEL'] = 'debug'
15
16
project_name = "aicare"
17
18
hydra.initialize(config_path="configs", version_base=None)
19
cfg = OmegaConf.to_container(hydra.compose(config_name="config"))
20
21
dataset_config = {
22
    'tjh': {'demo_dim': 2, 'lab_dim': 73},
23
    'cdsl': {'demo_dim': 2, 'lab_dim': 97},
24
    'mimic-iii': {'demo_dim': 2, 'lab_dim': 59},
25
    'mimic-iv': {'demo_dim': 2, 'lab_dim': 59},
26
}
27
28
sweep_configuration = {
29
    'method': 'grid',
30
    'name': 'sweep_dl_mimic',
31
    'metric': {'goal': 'minimize', 'name': 'val_loss'},
32
    'parameters': 
33
    {
34
        'task': {'values': ['outcome']},
35
        'dataset': {'values': ['mimic-iii', 'mimic-iv']},
36
        'model': {'values': ['MLP', 'GRU', 'RNN', 'LSTM', 'TCN', 'Transformer', 'AdaCare', 'Agent', 'GRASP', 'RETAIN', 'StageNet', 'MCGRU']},
37
        'batch_size': {'values': [1024]},
38
        'hidden_dim': {'values': [64, 128]},
39
        'learning_rate': {'values': [1e-2, 1e-3, 1e-4]},
40
        'fold': {'values': [0]},
41
        'seed': {'values': [0]},
42
    }
43
}
44
45
sweep_id = wandb.sweep(sweep_configuration, project=project_name)
46
47
def run_experiment():
48
    run = wandb.init(project=project_name, config=cfg)
49
    wandb_logger = WandbLogger(project=project_name, log_model=True) # log only the last (best) checkpoint
50
    config = wandb.config
51
    config.update(dataset_config[config['dataset']], allow_val_change=True)
52
    los_config = get_los_info(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}')
53
    main_metric = "mae" if config["task"] == "los" else "auprc"
54
    config.update({"los_info": los_config, "main_metric": main_metric})
55
    
56
    # data
57
    dm = EhrDataModule(f'datasets/{config["dataset"]}/processed/fold_{config["fold"]}', batch_size=config["batch_size"])
58
59
    # EarlyStop and checkpoint callback
60
    if config["task"] in ["outcome", "multitask"]:
61
        early_stopping_callback = EarlyStopping(monitor="auprc", patience=config["patience"], mode="max",)
62
        checkpoint_callback = ModelCheckpoint(monitor="auprc", mode="max")
63
    elif config["task"] == "los":
64
        early_stopping_callback = EarlyStopping(monitor="mae", patience=config["patience"], mode="min",)
65
        checkpoint_callback = ModelCheckpoint(monitor="mae", mode="min")
66
67
    L.seed_everything(config["seed"]) # seed for reproducibility
68
69
    # train/val/test
70
    pipeline = DlPipeline(config.as_dict())
71
    trainer = L.Trainer(accelerator="gpu", devices=[1], max_epochs=config["epochs"], logger=wandb_logger, callbacks=[early_stopping_callback, checkpoint_callback], num_sanity_val_steps=0)
72
    trainer.fit(pipeline, dm)
73
    print("Best Score", checkpoint_callback.best_model_score)
74
75
if __name__ == "__main__":
76
   wandb.agent(sweep_id, function=run_experiment, project=project_name)