[95f789]: / configs / config_toan_resume.yml

Download this file

99 lines (78 with data), 2.0 kB

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
model_params:
model: &model CNNFinetuneModels
model_name: &model_name densenet169
num_classes: 6
args:
expdir: "src"
logdir: &logdir "./logs/rsna"
baselogdir: "./logs/rsna"
distributed_params:
opt_level: O1
stages:
state_params:
main_metric: &reduce_metric loss
minimize_metric: True
criterion_params:
criterion: &criterion LogLoss
weight: [1,1,1,1,1,2]
data_params:
batch_size: 32
num_workers: 4
drop_last: False
image_size: &image_size [512, 512]
train_csv: "./csv/stratified_kfold/train_0.csv.gz"
valid_csv: "./csv/stratified_kfold/valid_0.csv.gz"
# dataset_type: "RSNAMultiWindowsDataset"
with_any: True
root: "../stage_1_train_images_jpg_preprocessing/"
image_type: "jpg"
# warmup:
# optimizer_params:
# optimizer: AdamW
# lr: 0.001
# scheduler_params:
# scheduler: MultiStepLR
# milestones: [10]
# gamma: 0.3
# state_params:
# num_epochs: 3
# callbacks_params: &callbacks_params
# loss:
# callback: CriterionCallback
# optimizer:
# callback: OptimizerCallback
# accumulation_steps: 1
# scheduler:
# callback: SchedulerCallback
# reduce_metric: *reduce_metric
# saver:
# callback: CheckpointCallback
# save_n_best: 5
stage1:
optimizer_params:
optimizer: AdamW
lr: 0.00001
scheduler_params:
scheduler: MultiStepLR
milestones: [10]
gamma: 0.3
state_params:
num_epochs: 5
callbacks_params:
loss:
callback: CriterionCallback
optimizer:
callback: OptimizerCallback
accumulation_steps: 1
scheduler:
callback: SchedulerCallback
reduce_metric: *reduce_metric
saver:
callback: CheckpointCallback
save_n_best: 5
early_stoping:
callback: EarlyStoppingCallback
patience: 2
monitoring_params:
project: "Kaggle-RSNA"
tags: [*model, *model_name, *criterion]