Switch to unified view

a b/tools/model_converters/mit2mmseg.py
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import argparse
3
import os.path as osp
4
from collections import OrderedDict
5
6
import mmcv
7
import torch
8
from mmcv.runner import CheckpointLoader
9
10
11
def convert_mit(ckpt):
12
    new_ckpt = OrderedDict()
13
    # Process the concat between q linear weights and kv linear weights
14
    for k, v in ckpt.items():
15
        if k.startswith('head'):
16
            continue
17
        # patch embedding conversion
18
        elif k.startswith('patch_embed'):
19
            stage_i = int(k.split('.')[0].replace('patch_embed', ''))
20
            new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i-1}.0')
21
            new_v = v
22
            if 'proj.' in new_k:
23
                new_k = new_k.replace('proj.', 'projection.')
24
        # transformer encoder layer conversion
25
        elif k.startswith('block'):
26
            stage_i = int(k.split('.')[0].replace('block', ''))
27
            new_k = k.replace(f'block{stage_i}', f'layers.{stage_i-1}.1')
28
            new_v = v
29
            if 'attn.q.' in new_k:
30
                sub_item_k = k.replace('q.', 'kv.')
31
                new_k = new_k.replace('q.', 'attn.in_proj_')
32
                new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
33
            elif 'attn.kv.' in new_k:
34
                continue
35
            elif 'attn.proj.' in new_k:
36
                new_k = new_k.replace('proj.', 'attn.out_proj.')
37
            elif 'attn.sr.' in new_k:
38
                new_k = new_k.replace('sr.', 'sr.')
39
            elif 'mlp.' in new_k:
40
                string = f'{new_k}-'
41
                new_k = new_k.replace('mlp.', 'ffn.layers.')
42
                if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
43
                    new_v = v.reshape((*v.shape, 1, 1))
44
                new_k = new_k.replace('fc1.', '0.')
45
                new_k = new_k.replace('dwconv.dwconv.', '1.')
46
                new_k = new_k.replace('fc2.', '4.')
47
                string += f'{new_k} {v.shape}-{new_v.shape}'
48
        # norm layer conversion
49
        elif k.startswith('norm'):
50
            stage_i = int(k.split('.')[0].replace('norm', ''))
51
            new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i-1}.2')
52
            new_v = v
53
        else:
54
            new_k = k
55
            new_v = v
56
        new_ckpt[new_k] = new_v
57
    return new_ckpt
58
59
60
def main():
61
    parser = argparse.ArgumentParser(
62
        description='Convert keys in official pretrained segformer to '
63
        'MMSegmentation style.')
64
    parser.add_argument('src', help='src model path or url')
65
    # The dst path must be a full path of the new checkpoint.
66
    parser.add_argument('dst', help='save path')
67
    args = parser.parse_args()
68
69
    checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
70
    if 'state_dict' in checkpoint:
71
        state_dict = checkpoint['state_dict']
72
    elif 'model' in checkpoint:
73
        state_dict = checkpoint['model']
74
    else:
75
        state_dict = checkpoint
76
    weight = convert_mit(state_dict)
77
    mmcv.mkdir_or_exist(osp.dirname(args.dst))
78
    torch.save(weight, args.dst)
79
80
81
if __name__ == '__main__':
82
    main()