Switch to side-by-side view

--- a
+++ b/utils/loggers/wandb/wandb_utils.py
@@ -0,0 +1,193 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# WARNING ⚠️ wandb is deprecated and will be removed in future release.
+# See supported integrations at https://github.com/ultralytics/yolov5#integrations
+
+import logging
+import os
+import sys
+from contextlib import contextmanager
+from pathlib import Path
+
+from utils.general import LOGGER, colorstr
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[3]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+RANK = int(os.getenv('RANK', -1))
+DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \
+                      f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.'
+
+try:
+    import wandb
+
+    assert hasattr(wandb, '__version__')  # verify package import not local dir
+    LOGGER.warning(DEPRECATION_WARNING)
+except (ImportError, AssertionError):
+    wandb = None
+
+
+class WandbLogger():
+    """Log training runs, datasets, models, and predictions to Weights & Biases.
+
+    This logger sends information to W&B at wandb.ai. By default, this information
+    includes hyperparameters, system configuration and metrics, model metrics,
+    and basic data metrics and analyses.
+
+    By providing additional command line arguments to train.py, datasets,
+    models and predictions can also be logged.
+
+    For more on how this logger is used, see the Weights & Biases documentation:
+    https://docs.wandb.com/guides/integrations/yolov5
+    """
+
+    def __init__(self, opt, run_id=None, job_type='Training'):
+        """
+        - Initialize WandbLogger instance
+        - Upload dataset if opt.upload_dataset is True
+        - Setup training processes if job_type is 'Training'
+
+        arguments:
+        opt (namespace) -- Commandline arguments for this run
+        run_id (str) -- Run ID of W&B run to be resumed
+        job_type (str) -- To set the job_type for this run
+
+       """
+        # Pre-training routine --
+        self.job_type = job_type
+        self.wandb, self.wandb_run = wandb, wandb.run if wandb else None
+        self.val_artifact, self.train_artifact = None, None
+        self.train_artifact_path, self.val_artifact_path = None, None
+        self.result_artifact = None
+        self.val_table, self.result_table = None, None
+        self.max_imgs_to_log = 16
+        self.data_dict = None
+        if self.wandb:
+            self.wandb_run = wandb.init(config=opt,
+                                        resume='allow',
+                                        project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
+                                        entity=opt.entity,
+                                        name=opt.name if opt.name != 'exp' else None,
+                                        job_type=job_type,
+                                        id=run_id,
+                                        allow_val_change=True) if not wandb.run else wandb.run
+
+        if self.wandb_run:
+            if self.job_type == 'Training':
+                if isinstance(opt.data, dict):
+                    # This means another dataset manager has already processed the dataset info (e.g. ClearML)
+                    # and they will have stored the already processed dict in opt.data
+                    self.data_dict = opt.data
+                self.setup_training(opt)
+
+    def setup_training(self, opt):
+        """
+        Setup the necessary processes for training YOLO models:
+          - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
+          - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
+          - Setup log_dict, initialize bbox_interval
+
+        arguments:
+        opt (namespace) -- commandline arguments for this run
+
+        """
+        self.log_dict, self.current_epoch = {}, 0
+        self.bbox_interval = opt.bbox_interval
+        if isinstance(opt.resume, str):
+            model_dir, _ = self.download_model_artifact(opt)
+            if model_dir:
+                self.weights = Path(model_dir) / 'last.pt'
+                config = self.wandb_run.config
+                opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str(
+                    self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
+                    config.hyp, config.imgsz
+
+        if opt.bbox_interval == -1:
+            self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
+            if opt.evolve or opt.noplots:
+                self.bbox_interval = opt.bbox_interval = opt.epochs + 1  # disable bbox_interval
+
+    def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+        """
+        Log the model checkpoint as W&B artifact
+
+        arguments:
+        path (Path)   -- Path of directory containing the checkpoints
+        opt (namespace) -- Command line arguments for this run
+        epoch (int)  -- Current epoch number
+        fitness_score (float) -- fitness score for current epoch
+        best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
+        """
+        model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model',
+                                        type='model',
+                                        metadata={
+                                            'original_url': str(path),
+                                            'epochs_trained': epoch + 1,
+                                            'save period': opt.save_period,
+                                            'project': opt.project,
+                                            'total_epochs': opt.epochs,
+                                            'fitness_score': fitness_score})
+        model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
+        wandb.log_artifact(model_artifact,
+                           aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
+        LOGGER.info(f'Saving model artifact on epoch {epoch + 1}')
+
+    def val_one_image(self, pred, predn, path, names, im):
+        pass
+
+    def log(self, log_dict):
+        """
+        save the metrics to the logging dictionary
+
+        arguments:
+        log_dict (Dict) -- metrics/media to be logged in current step
+        """
+        if self.wandb_run:
+            for key, value in log_dict.items():
+                self.log_dict[key] = value
+
+    def end_epoch(self):
+        """
+        commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
+
+        arguments:
+        best_result (boolean): Boolean representing if the result of this evaluation is best or not
+        """
+        if self.wandb_run:
+            with all_logging_disabled():
+                try:
+                    wandb.log(self.log_dict)
+                except BaseException as e:
+                    LOGGER.info(
+                        f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}'
+                    )
+                    self.wandb_run.finish()
+                    self.wandb_run = None
+                self.log_dict = {}
+
+    def finish_run(self):
+        """
+        Log metrics if any and finish the current W&B run
+        """
+        if self.wandb_run:
+            if self.log_dict:
+                with all_logging_disabled():
+                    wandb.log(self.log_dict)
+            wandb.run.finish()
+            LOGGER.warning(DEPRECATION_WARNING)
+
+
+@contextmanager
+def all_logging_disabled(highest_level=logging.CRITICAL):
+    """ source - https://gist.github.com/simon-weber/7853144
+    A context manager that will prevent any logging messages triggered during the body from being processed.
+    :param highest_level: the maximum logging level in use.
+      This would only need to be changed if a custom level greater than CRITICAL is defined.
+    """
+    previous_level = logging.root.manager.disable
+    logging.disable(highest_level)
+    try:
+        yield
+    finally:
+        logging.disable(previous_level)