|
a |
|
b/utils/loggers/clearml/hpo.py |
|
|
1 |
from clearml import Task |
|
|
2 |
# Connecting ClearML with the current process, |
|
|
3 |
# from here on everything is logged automatically |
|
|
4 |
from clearml.automation import HyperParameterOptimizer, UniformParameterRange |
|
|
5 |
from clearml.automation.optuna import OptimizerOptuna |
|
|
6 |
|
|
|
7 |
task = Task.init(project_name='Hyper-Parameter Optimization', |
|
|
8 |
task_name='YOLOv5', |
|
|
9 |
task_type=Task.TaskTypes.optimizer, |
|
|
10 |
reuse_last_task_id=False) |
|
|
11 |
|
|
|
12 |
# Example use case: |
|
|
13 |
optimizer = HyperParameterOptimizer( |
|
|
14 |
# This is the experiment we want to optimize |
|
|
15 |
base_task_id='<your_template_task_id>', |
|
|
16 |
# here we define the hyper-parameters to optimize |
|
|
17 |
# Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter> |
|
|
18 |
# For Example, here we see in the base experiment a section Named: "General" |
|
|
19 |
# under it a parameter named "batch_size", this becomes "General/batch_size" |
|
|
20 |
# If you have `argparse` for example, then arguments will appear under the "Args" section, |
|
|
21 |
# and you should instead pass "Args/batch_size" |
|
|
22 |
hyper_parameters=[ |
|
|
23 |
UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), |
|
|
24 |
UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), |
|
|
25 |
UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), |
|
|
26 |
UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), |
|
|
27 |
UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), |
|
|
28 |
UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), |
|
|
29 |
UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), |
|
|
30 |
UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), |
|
|
31 |
UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), |
|
|
32 |
UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), |
|
|
33 |
UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), |
|
|
34 |
UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), |
|
|
35 |
UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), |
|
|
36 |
UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), |
|
|
37 |
UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), |
|
|
38 |
UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), |
|
|
39 |
UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), |
|
|
40 |
UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), |
|
|
41 |
UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), |
|
|
42 |
UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), |
|
|
43 |
UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), |
|
|
44 |
UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), |
|
|
45 |
UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), |
|
|
46 |
UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), |
|
|
47 |
UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), |
|
|
48 |
UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), |
|
|
49 |
UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), |
|
|
50 |
UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], |
|
|
51 |
# this is the objective metric we want to maximize/minimize |
|
|
52 |
objective_metric_title='metrics', |
|
|
53 |
objective_metric_series='mAP_0.5', |
|
|
54 |
# now we decide if we want to maximize it or minimize it (accuracy we maximize) |
|
|
55 |
objective_metric_sign='max', |
|
|
56 |
# let us limit the number of concurrent experiments, |
|
|
57 |
# this in turn will make sure we do dont bombard the scheduler with experiments. |
|
|
58 |
# if we have an auto-scaler connected, this, by proxy, will limit the number of machine |
|
|
59 |
max_number_of_concurrent_tasks=1, |
|
|
60 |
# this is the optimizer class (actually doing the optimization) |
|
|
61 |
# Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) |
|
|
62 |
optimizer_class=OptimizerOptuna, |
|
|
63 |
# If specified only the top K performing Tasks will be kept, the others will be automatically archived |
|
|
64 |
save_top_k_tasks_only=5, # 5, |
|
|
65 |
compute_time_limit=None, |
|
|
66 |
total_max_jobs=20, |
|
|
67 |
min_iteration_per_job=None, |
|
|
68 |
max_iteration_per_job=None, |
|
|
69 |
) |
|
|
70 |
|
|
|
71 |
# report every 10 seconds, this is way too often, but we are testing here |
|
|
72 |
optimizer.set_report_period(10 / 60) |
|
|
73 |
# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent |
|
|
74 |
# an_optimizer.start_locally(job_complete_callback=job_complete_callback) |
|
|
75 |
# set the time limit for the optimization process (2 hours) |
|
|
76 |
optimizer.set_time_limit(in_minutes=120.0) |
|
|
77 |
# Start the optimization process in the local environment |
|
|
78 |
optimizer.start_locally() |
|
|
79 |
# wait until process is done (notice we are controlling the optimization process in the background) |
|
|
80 |
optimizer.wait() |
|
|
81 |
# make sure background optimization stopped |
|
|
82 |
optimizer.stop() |
|
|
83 |
|
|
|
84 |
print('We are done, good bye') |