[6d389a]: / tests / test_runtime / test_apis_test.py

Download this file

120 lines (89 with data), 3.4 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Copyright (c) OpenMMLab. All rights reserved.
import sys
import warnings
from unittest.mock import MagicMock, Mock, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
# TODO import test functions from mmcv and delete them from mmaction2
try:
from mmcv.engine import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
pytest.skip(
'Test functions are supported in MMCV', allow_module_level=True)
except (ImportError, ModuleNotFoundError):
warnings.warn(
'DeprecationWarning: single_gpu_test, multi_gpu_test, '
'collect_results_cpu, collect_results_gpu from mmaction2 will be '
'deprecated. Please install mmcv through master branch.')
from mmaction.apis.test import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.cnt = 0
def forward(self, *args, **kwargs):
result = [self.cnt]
self.cnt += 1
return result
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return len(self.eval_result)
def test_single_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = single_gpu_test(model, loader)
assert results == list(range(8))
def mock_tensor_without_cuda(*args, **kwargs):
if 'device' not in kwargs:
return torch.Tensor(*args)
return torch.IntTensor(*args, device='cpu')
@patch('mmaction.apis.test.collect_results_gpu',
Mock(return_value=list(range(8))))
@patch('mmaction.apis.test.collect_results_cpu',
Mock(return_value=list(range(8))))
def test_multi_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = multi_gpu_test(model, loader)
assert results == list(range(8))
results = multi_gpu_test(model, loader, gpu_collect=False)
assert results == list(range(8))
@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1)))
@patch('torch.distributed.broadcast', MagicMock)
@patch('torch.distributed.barrier', Mock)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 8), reason='Not for python 3.8')
def test_collect_results_cpu():
def content_for_unittest():
results_part = list(range(8))
size = 8
results = collect_results_cpu(results_part, size)
assert results == list(range(8))
results = collect_results_cpu(results_part, size, 'unittest')
assert results == list(range(8))
if not torch.cuda.is_available():
with patch(
'torch.full',
Mock(
return_value=torch.full(
(512, ), 32, dtype=torch.uint8, device='cpu'))):
with patch('torch.tensor', mock_tensor_without_cuda):
content_for_unittest()
else:
content_for_unittest()