Switch to unified view

a b/bert_mixup/early_mixup/eval.py
1
import torch
2
from utils import flat_auroc_score
3
4
5
def evaluate_model(args, model_mlp, test_dataloader, criterion, set_device):
6
    test_loss_history, auroc_test_history = list(), list()
7
    model_mlp.eval()
8
    predictions = None
9
    with torch.no_grad():
10
        test_loss_scores = list()
11
        y_true_val, y_pred_val = list(), list()
12
13
        ## perform test pass
14
        for batch, targets in test_dataloader:
15
            ## perform forward pass
16
            batch = batch.type(torch.FloatTensor).to(set_device)
17
            pred = model_mlp(batch)
18
            predictions = pred
19
            preds = torch.max(pred, 1)[1]
20
21
            ## accumulate predictions per batch for the epoch
22
            y_pred_val += list([x.item() for x in preds.detach().cpu().numpy()])
23
            targets = torch.LongTensor([x.item() for x in list(targets)])
24
            y_true_val += list([x.item() for x in targets.detach().cpu().numpy()])
25
26
            ## computing validate loss
27
            loss = criterion(
28
                pred.to(set_device), targets.to(set_device)
29
            )  ## compute loss
30
31
            ## accumulate validate loss
32
            test_loss_scores.append(loss.item())
33
34
        ## accumulate loss, auroc, f1, precision per epoch
35
        test_loss_history.append((sum(test_loss_scores) / len(test_loss_scores)))
36
        auroc = flat_auroc_score(predictions, y_true_val)
37
        auroc_test_history.append(auroc)
38
39
        print(f"Test => AUROC score: {auroc_test_history[-1]} ")
40
        with open(args.out_file, "a+") as f:
41
            f.write(
42
                f"{args.dataset_name}, {args.samples_per_class}, {args.n_augment}, {auroc_test_history[-1]}\n"
43
            )