Diff of /segment/val.py [000000] .. [190ca4]

Switch to unified view

a b/segment/val.py
1
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
"""
3
Validate a trained YOLOv5 segment model on a segment dataset
4
5
Usage:
6
    $ bash data/scripts/get_coco.sh --val --segments  # download COCO-segments val split (1G, 5000 images)
7
    $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640  # validate COCO-segments
8
9
Usage - formats:
10
    $ python segment/val.py --weights yolov5s-seg.pt                 # PyTorch
11
                                      yolov5s-seg.torchscript        # TorchScript
12
                                      yolov5s-seg.onnx               # ONNX Runtime or OpenCV DNN with --dnn
13
                                      yolov5s-seg_openvino_label     # OpenVINO
14
                                      yolov5s-seg.engine             # TensorRT
15
                                      yolov5s-seg.mlmodel            # CoreML (macOS-only)
16
                                      yolov5s-seg_saved_model        # TensorFlow SavedModel
17
                                      yolov5s-seg.pb                 # TensorFlow GraphDef
18
                                      yolov5s-seg.tflite             # TensorFlow Lite
19
                                      yolov5s-seg_edgetpu.tflite     # TensorFlow Edge TPU
20
                                      yolov5s-seg_paddle_model       # PaddlePaddle
21
"""
22
23
import argparse
24
import json
25
import os
26
import subprocess
27
import sys
28
from multiprocessing.pool import ThreadPool
29
from pathlib import Path
30
31
import numpy as np
32
import torch
33
from tqdm import tqdm
34
35
FILE = Path(__file__).resolve()
36
ROOT = FILE.parents[1]  # YOLOv5 root directory
37
if str(ROOT) not in sys.path:
38
    sys.path.append(str(ROOT))  # add ROOT to PATH
39
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
40
41
import torch.nn.functional as F
42
43
from models.common import DetectMultiBackend
44
from models.yolo import SegmentationModel
45
from utils.callbacks import Callbacks
46
from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
47
                           check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
48
                           non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
49
from utils.metrics import ConfusionMatrix, box_iou
50
from utils.plots import output_to_target, plot_val_study
51
from utils.segment.dataloaders import create_dataloader
52
from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
53
from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
54
from utils.segment.plots import plot_images_and_masks
55
from utils.torch_utils import de_parallel, select_device, smart_inference_mode
56
57
58
def save_one_txt(predn, save_conf, shape, file):
59
    # Save one txt result
60
    gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh
61
    for *xyxy, conf, cls in predn.tolist():
62
        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
63
        line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format
64
        with open(file, 'a') as f:
65
            f.write(('%g ' * len(line)).rstrip() % line + '\n')
66
67
68
def save_one_json(predn, jdict, path, class_map, pred_masks):
69
    # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
70
    from pycocotools.mask import encode
71
72
    def single_encode(x):
73
        rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
74
        rle['counts'] = rle['counts'].decode('utf-8')
75
        return rle
76
77
    image_id = int(path.stem) if path.stem.isnumeric() else path.stem
78
    box = xyxy2xywh(predn[:, :4])  # xywh
79
    box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
80
    pred_masks = np.transpose(pred_masks, (2, 0, 1))
81
    with ThreadPool(NUM_THREADS) as pool:
82
        rles = pool.map(single_encode, pred_masks)
83
    for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
84
        jdict.append({
85
            'image_id': image_id,
86
            'category_id': class_map[int(p[5])],
87
            'bbox': [round(x, 3) for x in b],
88
            'score': round(p[4], 5),
89
            'segmentation': rles[i]})
90
91
92
def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
93
    """
94
    Return correct prediction matrix
95
    Arguments:
96
        detections (array[N, 6]), x1, y1, x2, y2, conf, class
97
        labels (array[M, 5]), class, x1, y1, x2, y2
98
    Returns:
99
        correct (array[N, 10]), for 10 IoU levels
100
    """
101
    if masks:
102
        if overlap:
103
            nl = len(labels)
104
            index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
105
            gt_masks = gt_masks.repeat(nl, 1, 1)  # shape(1,640,640) -> (n,640,640)
106
            gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
107
        if gt_masks.shape[1:] != pred_masks.shape[1:]:
108
            gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
109
            gt_masks = gt_masks.gt_(0.5)
110
        iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
111
    else:  # boxes
112
        iou = box_iou(labels[:, 1:], detections[:, :4])
113
114
    correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
115
    correct_class = labels[:, 0:1] == detections[:, 5]
116
    for i in range(len(iouv)):
117
        x = torch.where((iou >= iouv[i]) & correct_class)  # IoU > threshold and classes match
118
        if x[0].shape[0]:
119
            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detect, iou]
120
            if x[0].shape[0] > 1:
121
                matches = matches[matches[:, 2].argsort()[::-1]]
122
                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
123
                # matches = matches[matches[:, 2].argsort()[::-1]]
124
                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
125
            correct[matches[:, 1].astype(int), i] = True
126
    return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
127
128
129
@smart_inference_mode()
130
def run(
131
        data,
132
        weights=None,  # model.pt path(s)
133
        batch_size=32,  # batch size
134
        imgsz=640,  # inference size (pixels)
135
        conf_thres=0.001,  # confidence threshold
136
        iou_thres=0.6,  # NMS IoU threshold
137
        max_det=300,  # maximum detections per image
138
        task='val',  # train, val, test, speed or study
139
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
140
        workers=8,  # max dataloader workers (per RANK in DDP mode)
141
        single_cls=False,  # treat as single-class dataset
142
        augment=False,  # augmented inference
143
        verbose=False,  # verbose output
144
        save_txt=False,  # save results to *.txt
145
        save_hybrid=False,  # save label+prediction hybrid results to *.txt
146
        save_conf=False,  # save confidences in --save-txt labels
147
        save_json=False,  # save a COCO-JSON results file
148
        project=ROOT / 'runs/val-seg',  # save to project/name
149
        name='exp',  # save to project/name
150
        exist_ok=False,  # existing project/name ok, do not increment
151
        half=True,  # use FP16 half-precision inference
152
        dnn=False,  # use OpenCV DNN for ONNX inference
153
        model=None,
154
        dataloader=None,
155
        save_dir=Path(''),
156
        plots=True,
157
        overlap=False,
158
        mask_downsample_ratio=1,
159
        compute_loss=None,
160
        callbacks=Callbacks(),
161
):
162
    if save_json:
163
        check_requirements('pycocotools>=2.0.6')
164
        process = process_mask_native  # more accurate
165
    else:
166
        process = process_mask  # faster
167
168
    # Initialize/load model and set device
169
    training = model is not None
170
    if training:  # called by train.py
171
        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model
172
        half &= device.type != 'cpu'  # half precision only supported on CUDA
173
        model.half() if half else model.float()
174
        nm = de_parallel(model).model[-1].nm  # number of masks
175
    else:  # called directly
176
        device = select_device(device, batch_size=batch_size)
177
178
        # Directories
179
        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
180
        (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
181
182
        # Load model
183
        model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
184
        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
185
        imgsz = check_img_size(imgsz, s=stride)  # check image size
186
        half = model.fp16  # FP16 supported on limited backends with CUDA
187
        nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32  # number of masks
188
        if engine:
189
            batch_size = model.batch_size
190
        else:
191
            device = model.device
192
            if not (pt or jit):
193
                batch_size = 1  # export.py models default to batch-size 1
194
                LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
195
196
        # Data
197
        data = check_dataset(data)  # check
198
199
    # Configure
200
    model.eval()
201
    cuda = device.type != 'cpu'
202
    is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt')  # COCO dataset
203
    nc = 1 if single_cls else int(data['nc'])  # number of classes
204
    iouv = torch.linspace(0.5, 0.95, 10, device=device)  # iou vector for mAP@0.5:0.95
205
    niou = iouv.numel()
206
207
    # Dataloader
208
    if not training:
209
        if pt and not single_cls:  # check --weights are trained on --data
210
            ncm = model.model.nc
211
            assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
212
                              f'classes). Pass correct combination of --weights and --data that are trained together.'
213
        model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz))  # warmup
214
        pad, rect = (0.0, False) if task == 'speed' else (0.5, pt)  # square inference for benchmarks
215
        task = task if task in ('train', 'val', 'test') else 'val'  # path to train/val/test images
216
        dataloader = create_dataloader(data[task],
217
                                       imgsz,
218
                                       batch_size,
219
                                       stride,
220
                                       single_cls,
221
                                       pad=pad,
222
                                       rect=rect,
223
                                       workers=workers,
224
                                       prefix=colorstr(f'{task}: '),
225
                                       overlap_mask=overlap,
226
                                       mask_downsample_ratio=mask_downsample_ratio)[0]
227
228
    seen = 0
229
    confusion_matrix = ConfusionMatrix(nc=nc)
230
    names = model.names if hasattr(model, 'names') else model.module.names  # get class names
231
    if isinstance(names, (list, tuple)):  # old format
232
        names = dict(enumerate(names))
233
    class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
234
    s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
235
                                  'mAP50', 'mAP50-95)')
236
    dt = Profile(device=device), Profile(device=device), Profile(device=device)
237
    metrics = Metrics()
238
    loss = torch.zeros(4, device=device)
239
    jdict, stats = [], []
240
    # callbacks.run('on_val_start')
241
    pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT)  # progress bar
242
    for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
243
        # callbacks.run('on_val_batch_start')
244
        with dt[0]:
245
            if cuda:
246
                im = im.to(device, non_blocking=True)
247
                targets = targets.to(device)
248
                masks = masks.to(device)
249
            masks = masks.float()
250
            im = im.half() if half else im.float()  # uint8 to fp16/32
251
            im /= 255  # 0 - 255 to 0.0 - 1.0
252
            nb, _, height, width = im.shape  # batch size, channels, height, width
253
254
        # Inference
255
        with dt[1]:
256
            preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
257
258
        # Loss
259
        if compute_loss:
260
            loss += compute_loss((train_out, protos), targets, masks)[1]  # box, obj, cls
261
262
        # NMS
263
        targets[:, 2:] *= torch.tensor((width, height, width, height), device=device)  # to pixels
264
        lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling
265
        with dt[2]:
266
            preds = non_max_suppression(preds,
267
                                        conf_thres,
268
                                        iou_thres,
269
                                        labels=lb,
270
                                        multi_label=True,
271
                                        agnostic=single_cls,
272
                                        max_det=max_det,
273
                                        nm=nm)
274
275
        # Metrics
276
        plot_masks = []  # masks for plotting
277
        for si, (pred, proto) in enumerate(zip(preds, protos)):
278
            labels = targets[targets[:, 0] == si, 1:]
279
            nl, npr = labels.shape[0], pred.shape[0]  # number of labels, predictions
280
            path, shape = Path(paths[si]), shapes[si][0]
281
            correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init
282
            correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init
283
            seen += 1
284
285
            if npr == 0:
286
                if nl:
287
                    stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
288
                    if plots:
289
                        confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
290
                continue
291
292
            # Masks
293
            midx = [si] if overlap else targets[:, 0] == si
294
            gt_masks = masks[midx]
295
            pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
296
297
            # Predictions
298
            if single_cls:
299
                pred[:, 5] = 0
300
            predn = pred.clone()
301
            scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1])  # native-space pred
302
303
            # Evaluate
304
            if nl:
305
                tbox = xywh2xyxy(labels[:, 1:5])  # target boxes
306
                scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1])  # native-space labels
307
                labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels
308
                correct_bboxes = process_batch(predn, labelsn, iouv)
309
                correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
310
                if plots:
311
                    confusion_matrix.process_batch(predn, labelsn)
312
            stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0]))  # (conf, pcls, tcls)
313
314
            pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
315
            if plots and batch_i < 3:
316
                plot_masks.append(pred_masks[:15])  # filter top 15 to plot
317
318
            # Save/log
319
            if save_txt:
320
                save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
321
            if save_json:
322
                pred_masks = scale_image(im[si].shape[1:],
323
                                         pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
324
                save_one_json(predn, jdict, path, class_map, pred_masks)  # append to COCO-JSON dictionary
325
            # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
326
327
        # Plot images
328
        if plots and batch_i < 3:
329
            if len(plot_masks):
330
                plot_masks = torch.cat(plot_masks, dim=0)
331
            plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
332
            plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
333
                                  save_dir / f'val_batch{batch_i}_pred.jpg', names)  # pred
334
335
        # callbacks.run('on_val_batch_end')
336
337
    # Compute metrics
338
    stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]  # to numpy
339
    if len(stats) and stats[0].any():
340
        results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
341
        metrics.update(results)
342
    nt = np.bincount(stats[4].astype(int), minlength=nc)  # number of targets per class
343
344
    # Print results
345
    pf = '%22s' + '%11i' * 2 + '%11.3g' * 8  # print format
346
    LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
347
    if nt.sum() == 0:
348
        LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
349
350
    # Print results per class
351
    if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
352
        for i, c in enumerate(metrics.ap_class_index):
353
            LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
354
355
    # Print speeds
356
    t = tuple(x.t / seen * 1E3 for x in dt)  # speeds per image
357
    if not training:
358
        shape = (batch_size, 3, imgsz, imgsz)
359
        LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
360
361
    # Plots
362
    if plots:
363
        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
364
    # callbacks.run('on_val_end')
365
366
    mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
367
368
    # Save JSON
369
    if save_json and len(jdict):
370
        w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else ''  # weights
371
        anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json'))  # annotations
372
        pred_json = str(save_dir / f'{w}_predictions.json')  # predictions
373
        LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
374
        with open(pred_json, 'w') as f:
375
            json.dump(jdict, f)
376
377
        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
378
            from pycocotools.coco import COCO
379
            from pycocotools.cocoeval import COCOeval
380
381
            anno = COCO(anno_json)  # init annotations api
382
            pred = anno.loadRes(pred_json)  # init predictions api
383
            results = []
384
            for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
385
                if is_coco:
386
                    eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files]  # img ID to evaluate
387
                eval.evaluate()
388
                eval.accumulate()
389
                eval.summarize()
390
                results.extend(eval.stats[:2])  # update results (mAP@0.5:0.95, mAP@0.5)
391
            map_bbox, map50_bbox, map_mask, map50_mask = results
392
        except Exception as e:
393
            LOGGER.info(f'pycocotools unable to run: {e}')
394
395
    # Return results
396
    model.float()  # for training
397
    if not training:
398
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
399
        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
400
    final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
401
    return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
402
403
404
def parse_opt():
405
    parser = argparse.ArgumentParser()
406
    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
407
    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
408
    parser.add_argument('--batch-size', type=int, default=32, help='batch size')
409
    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
410
    parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
411
    parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
412
    parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
413
    parser.add_argument('--task', default='val', help='train, val, test, speed or study')
414
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
415
    parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
416
    parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
417
    parser.add_argument('--augment', action='store_true', help='augmented inference')
418
    parser.add_argument('--verbose', action='store_true', help='report mAP by class')
419
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
420
    parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
421
    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
422
    parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
423
    parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
424
    parser.add_argument('--name', default='exp', help='save to project/name')
425
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
426
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
427
    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
428
    opt = parser.parse_args()
429
    opt.data = check_yaml(opt.data)  # check YAML
430
    # opt.save_json |= opt.data.endswith('coco.yaml')
431
    opt.save_txt |= opt.save_hybrid
432
    print_args(vars(opt))
433
    return opt
434
435
436
def main(opt):
437
    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
438
439
    if opt.task in ('train', 'val', 'test'):  # run normally
440
        if opt.conf_thres > 0.001:  # https://github.com/ultralytics/yolov5/issues/1466
441
            LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
442
        if opt.save_hybrid:
443
            LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
444
        run(**vars(opt))
445
446
    else:
447
        weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
448
        opt.half = torch.cuda.is_available() and opt.device != 'cpu'  # FP16 for fastest results
449
        if opt.task == 'speed':  # speed benchmarks
450
            # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
451
            opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
452
            for opt.weights in weights:
453
                run(**vars(opt), plots=False)
454
455
        elif opt.task == 'study':  # speed vs mAP benchmarks
456
            # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
457
            for opt.weights in weights:
458
                f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt'  # filename to save to
459
                x, y = list(range(256, 1536 + 128, 128)), []  # x axis (image sizes), y axis
460
                for opt.imgsz in x:  # img-size
461
                    LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
462
                    r, _, t = run(**vars(opt), plots=False)
463
                    y.append(r + t)  # results and times
464
                np.savetxt(f, y, fmt='%10.4g')  # save
465
            subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
466
            plot_val_study(x=x)  # plot
467
        else:
468
            raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
469
470
471
if __name__ == '__main__':
472
    opt = parse_opt()
473
    main(opt)