|
a |
|
b/drugclip.sh |
|
|
1 |
|
|
|
2 |
|
|
|
3 |
data_path="data" |
|
|
4 |
|
|
|
5 |
|
|
|
6 |
save_dir="savedir" |
|
|
7 |
|
|
|
8 |
tmp_save_dir="tmp_save_dir" |
|
|
9 |
tsb_dir="tsb_dir" |
|
|
10 |
|
|
|
11 |
n_gpu=1 |
|
|
12 |
MASTER_PORT=10055 |
|
|
13 |
finetune_mol_model="mol_pre_no_h_220816.pt" # unimol pretrained mol model |
|
|
14 |
finetune_pocket_model="pocket_pre_220816.pt" # unimol pretrained pocket model |
|
|
15 |
|
|
|
16 |
|
|
|
17 |
batch_size=48 |
|
|
18 |
batch_size_valid=64 |
|
|
19 |
batch_size_valid=128 |
|
|
20 |
epoch=200 |
|
|
21 |
dropout=0.0 |
|
|
22 |
warmup=0.06 |
|
|
23 |
update_freq=1 |
|
|
24 |
dist_threshold=8.0 |
|
|
25 |
recycling=3 |
|
|
26 |
lr=1e-3 |
|
|
27 |
|
|
|
28 |
export NCCL_ASYNC_ERROR_HANDLING=1 |
|
|
29 |
export OMP_NUM_THREADS=1 |
|
|
30 |
CUDA_VISIBLE_DEVICES="1" python -m torch.distributed.launch --nproc_per_node=$n_gpu --master_port=$MASTER_PORT $(which unicore-train) $data_path --user-dir ./unimol --train-subset train --valid-subset valid \ |
|
|
31 |
--num-workers 8 --ddp-backend=c10d \ |
|
|
32 |
--task drugclip --loss in_batch_softmax --arch drugclip \ |
|
|
33 |
--max-pocket-atoms 256 \ |
|
|
34 |
--optimizer adam --adam-betas "(0.9, 0.999)" --adam-eps 1e-8 --clip-norm 1.0 \ |
|
|
35 |
--lr-scheduler polynomial_decay --lr $lr --warmup-ratio $warmup --max-epoch $epoch --batch-size $batch_size --batch-size-valid $batch_size_valid \ |
|
|
36 |
--fp16 --fp16-init-scale 4 --fp16-scale-window 256 --update-freq $update_freq --seed 1 \ |
|
|
37 |
--tensorboard-logdir $tsb_dir \ |
|
|
38 |
--log-interval 100 --log-format simple \ |
|
|
39 |
--validate-interval 1 \ |
|
|
40 |
--best-checkpoint-metric valid_bedroc --patience 2000 --all-gather-list-size 2048000 \ |
|
|
41 |
--save-dir $save_dir --tmp-save-dir $tmp_save_dir --keep-last-epochs 5 \ |
|
|
42 |
--find-unused-parameters \ |
|
|
43 |
--maximize-best-checkpoint-metric \ |
|
|
44 |
--finetune-pocket-model $finetune_pocket_model \ |
|
|
45 |
--finetune-mol-model $finetune_mol_model \ |