a b/tests/test_eval_hook.py
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import logging
3
import tempfile
4
from unittest.mock import MagicMock, patch
5
6
import mmcv.runner
7
import pytest
8
import torch
9
import torch.nn as nn
10
from mmcv.runner import obj_from_dict
11
from torch.utils.data import DataLoader, Dataset
12
13
from mmseg.apis import single_gpu_test
14
from mmseg.core import DistEvalHook, EvalHook
15
16
17
class ExampleDataset(Dataset):
18
19
    def __getitem__(self, idx):
20
        results = dict(img=torch.tensor([1]), img_metas=dict())
21
        return results
22
23
    def __len__(self):
24
        return 1
25
26
27
class ExampleModel(nn.Module):
28
29
    def __init__(self):
30
        super(ExampleModel, self).__init__()
31
        self.test_cfg = None
32
        self.conv = nn.Conv2d(3, 3, 3)
33
34
    def forward(self, img, img_metas, test_mode=False, **kwargs):
35
        return img
36
37
    def train_step(self, data_batch, optimizer):
38
        loss = self.forward(**data_batch)
39
        return dict(loss=loss)
40
41
42
def test_iter_eval_hook():
43
    with pytest.raises(TypeError):
44
        test_dataset = ExampleModel()
45
        data_loader = [
46
            DataLoader(
47
                test_dataset,
48
                batch_size=1,
49
                sampler=None,
50
                num_worker=0,
51
                shuffle=False)
52
        ]
53
        EvalHook(data_loader)
54
55
    test_dataset = ExampleDataset()
56
    test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
57
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
58
    loader = DataLoader(test_dataset, batch_size=1)
59
    model = ExampleModel()
60
    data_loader = DataLoader(
61
        test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
62
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
63
    optimizer = obj_from_dict(optim_cfg, torch.optim,
64
                              dict(params=model.parameters()))
65
66
    # test EvalHook
67
    with tempfile.TemporaryDirectory() as tmpdir:
68
        eval_hook = EvalHook(data_loader, by_epoch=False, efficient_test=True)
69
        runner = mmcv.runner.IterBasedRunner(
70
            model=model,
71
            optimizer=optimizer,
72
            work_dir=tmpdir,
73
            logger=logging.getLogger())
74
        runner.register_hook(eval_hook)
75
        runner.run([loader], [('train', 1)], 1)
76
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
77
                                                 logger=runner.logger)
78
79
80
def test_epoch_eval_hook():
81
    with pytest.raises(TypeError):
82
        test_dataset = ExampleModel()
83
        data_loader = [
84
            DataLoader(
85
                test_dataset,
86
                batch_size=1,
87
                sampler=None,
88
                num_worker=0,
89
                shuffle=False)
90
        ]
91
        EvalHook(data_loader, by_epoch=True)
92
93
    test_dataset = ExampleDataset()
94
    test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
95
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
96
    loader = DataLoader(test_dataset, batch_size=1)
97
    model = ExampleModel()
98
    data_loader = DataLoader(
99
        test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
100
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
101
    optimizer = obj_from_dict(optim_cfg, torch.optim,
102
                              dict(params=model.parameters()))
103
104
    # test EvalHook with interval
105
    with tempfile.TemporaryDirectory() as tmpdir:
106
        eval_hook = EvalHook(data_loader, by_epoch=True, interval=2)
107
        runner = mmcv.runner.EpochBasedRunner(
108
            model=model,
109
            optimizer=optimizer,
110
            work_dir=tmpdir,
111
            logger=logging.getLogger())
112
        runner.register_hook(eval_hook)
113
        runner.run([loader], [('train', 1)], 2)
114
        test_dataset.evaluate.assert_called_once_with([torch.tensor([1])],
115
                                                      logger=runner.logger)
116
117
118
def multi_gpu_test(model,
119
                   data_loader,
120
                   tmpdir=None,
121
                   gpu_collect=False,
122
                   pre_eval=False):
123
    # Pre eval is set by default when training.
124
    results = single_gpu_test(model, data_loader, pre_eval=True)
125
    return results
126
127
128
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
129
def test_dist_eval_hook():
130
    with pytest.raises(TypeError):
131
        test_dataset = ExampleModel()
132
        data_loader = [
133
            DataLoader(
134
                test_dataset,
135
                batch_size=1,
136
                sampler=None,
137
                num_worker=0,
138
                shuffle=False)
139
        ]
140
        DistEvalHook(data_loader)
141
142
    test_dataset = ExampleDataset()
143
    test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
144
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
145
    loader = DataLoader(test_dataset, batch_size=1)
146
    model = ExampleModel()
147
    data_loader = DataLoader(
148
        test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
149
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
150
    optimizer = obj_from_dict(optim_cfg, torch.optim,
151
                              dict(params=model.parameters()))
152
153
    # test DistEvalHook
154
    with tempfile.TemporaryDirectory() as tmpdir:
155
        eval_hook = DistEvalHook(
156
            data_loader, by_epoch=False, efficient_test=True)
157
        runner = mmcv.runner.IterBasedRunner(
158
            model=model,
159
            optimizer=optimizer,
160
            work_dir=tmpdir,
161
            logger=logging.getLogger())
162
        runner.register_hook(eval_hook)
163
        runner.run([loader], [('train', 1)], 1)
164
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
165
                                                 logger=runner.logger)
166
167
168
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
169
def test_dist_eval_hook_epoch():
170
    with pytest.raises(TypeError):
171
        test_dataset = ExampleModel()
172
        data_loader = [
173
            DataLoader(
174
                test_dataset,
175
                batch_size=1,
176
                sampler=None,
177
                num_worker=0,
178
                shuffle=False)
179
        ]
180
        DistEvalHook(data_loader)
181
182
    test_dataset = ExampleDataset()
183
    test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
184
    test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
185
    loader = DataLoader(test_dataset, batch_size=1)
186
    model = ExampleModel()
187
    data_loader = DataLoader(
188
        test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
189
    optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
190
    optimizer = obj_from_dict(optim_cfg, torch.optim,
191
                              dict(params=model.parameters()))
192
193
    # test DistEvalHook
194
    with tempfile.TemporaryDirectory() as tmpdir:
195
        eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2)
196
        runner = mmcv.runner.EpochBasedRunner(
197
            model=model,
198
            optimizer=optimizer,
199
            work_dir=tmpdir,
200
            logger=logging.getLogger())
201
        runner.register_hook(eval_hook)
202
        runner.run([loader], [('train', 1)], 2)
203
        test_dataset.evaluate.assert_called_with([torch.tensor([1])],
204
                                                 logger=runner.logger)