|
a |
|
b/.dev/gather_benchmark_evaluation_results.py |
|
|
1 |
# Copyright (c) OpenMMLab. All rights reserved. |
|
|
2 |
import argparse |
|
|
3 |
import glob |
|
|
4 |
import os.path as osp |
|
|
5 |
|
|
|
6 |
import mmcv |
|
|
7 |
from mmcv import Config |
|
|
8 |
|
|
|
9 |
|
|
|
10 |
def parse_args(): |
|
|
11 |
parser = argparse.ArgumentParser( |
|
|
12 |
description='Gather benchmarked model evaluation results') |
|
|
13 |
parser.add_argument('config', help='test config file path') |
|
|
14 |
parser.add_argument( |
|
|
15 |
'root', |
|
|
16 |
type=str, |
|
|
17 |
help='root path of benchmarked models to be gathered') |
|
|
18 |
parser.add_argument( |
|
|
19 |
'--out', |
|
|
20 |
type=str, |
|
|
21 |
default='benchmark_evaluation_info.json', |
|
|
22 |
help='output path of gathered metrics and compared ' |
|
|
23 |
'results to be stored') |
|
|
24 |
|
|
|
25 |
args = parser.parse_args() |
|
|
26 |
return args |
|
|
27 |
|
|
|
28 |
|
|
|
29 |
if __name__ == '__main__': |
|
|
30 |
args = parse_args() |
|
|
31 |
|
|
|
32 |
root_path = args.root |
|
|
33 |
metrics_out = args.out |
|
|
34 |
result_dict = {} |
|
|
35 |
|
|
|
36 |
cfg = Config.fromfile(args.config) |
|
|
37 |
|
|
|
38 |
for model_key in cfg: |
|
|
39 |
model_infos = cfg[model_key] |
|
|
40 |
if not isinstance(model_infos, list): |
|
|
41 |
model_infos = [model_infos] |
|
|
42 |
for model_info in model_infos: |
|
|
43 |
previous_metrics = model_info['metric'] |
|
|
44 |
config = model_info['config'].strip() |
|
|
45 |
fname, _ = osp.splitext(osp.basename(config)) |
|
|
46 |
|
|
|
47 |
# Load benchmark evaluation json |
|
|
48 |
metric_json_dir = osp.join(root_path, fname) |
|
|
49 |
if not osp.exists(metric_json_dir): |
|
|
50 |
print(f'{metric_json_dir} not existed.') |
|
|
51 |
continue |
|
|
52 |
|
|
|
53 |
json_list = glob.glob(osp.join(metric_json_dir, '*.json')) |
|
|
54 |
if len(json_list) == 0: |
|
|
55 |
print(f'There is no eval json in {metric_json_dir}.') |
|
|
56 |
continue |
|
|
57 |
|
|
|
58 |
log_json_path = list(sorted(json_list))[-1] |
|
|
59 |
metric = mmcv.load(log_json_path) |
|
|
60 |
if config not in metric.get('config', {}): |
|
|
61 |
print(f'{config} not included in {log_json_path}') |
|
|
62 |
continue |
|
|
63 |
|
|
|
64 |
# Compare between new benchmark results and previous metrics |
|
|
65 |
differential_results = dict() |
|
|
66 |
new_metrics = dict() |
|
|
67 |
for record_metric_key in previous_metrics: |
|
|
68 |
if record_metric_key not in metric['metric']: |
|
|
69 |
raise KeyError('record_metric_key not exist, please ' |
|
|
70 |
'check your config') |
|
|
71 |
old_metric = previous_metrics[record_metric_key] |
|
|
72 |
new_metric = round(metric['metric'][record_metric_key] * 100, |
|
|
73 |
2) |
|
|
74 |
|
|
|
75 |
differential = new_metric - old_metric |
|
|
76 |
flag = '+' if differential > 0 else '-' |
|
|
77 |
differential_results[ |
|
|
78 |
record_metric_key] = f'{flag}{abs(differential):.2f}' |
|
|
79 |
new_metrics[record_metric_key] = new_metric |
|
|
80 |
|
|
|
81 |
result_dict[config] = dict( |
|
|
82 |
differential=differential_results, |
|
|
83 |
previous=previous_metrics, |
|
|
84 |
new=new_metrics) |
|
|
85 |
|
|
|
86 |
if metrics_out: |
|
|
87 |
mmcv.dump(result_dict, metrics_out, indent=4) |
|
|
88 |
print('===================================') |
|
|
89 |
for config_name, metrics in result_dict.items(): |
|
|
90 |
print(config_name, metrics) |
|
|
91 |
print('===================================') |