|
a |
|
b/options.py |
|
|
1 |
import argparse |
|
|
2 |
import os |
|
|
3 |
|
|
|
4 |
import torch |
|
|
5 |
|
|
|
6 |
### Parser |
|
|
7 |
|
|
|
8 |
def parse_args(): |
|
|
9 |
parser = argparse.ArgumentParser() |
|
|
10 |
parser.add_argument('--dataroot', default='./data/TCGA_GBMLGG', help="datasets") |
|
|
11 |
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/TCGA_GBMLGG', help='models are saved here') |
|
|
12 |
parser.add_argument('--exp_name', type=str, default='exp_name', help='name of the project. It decides where to store samples and models') |
|
|
13 |
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') |
|
|
14 |
parser.add_argument('--mode', type=str, default='omic', help='mode') |
|
|
15 |
parser.add_argument('--model_name', type=str, default='omic', help='mode') |
|
|
16 |
parser.add_argument('--use_vgg_features', type=int, default=0, help='Use pretrained embeddings') |
|
|
17 |
parser.add_argument('--use_rnaseq', type=int, default=0, help='Use RNAseq data.') |
|
|
18 |
|
|
|
19 |
parser.add_argument('--task', type=str, default='surv', help='surv | grad') |
|
|
20 |
parser.add_argument('--useRNA', type=int, default=0) # Doesn't work at the moment...:( |
|
|
21 |
parser.add_argument('--useSN', type=int, default=1) |
|
|
22 |
parser.add_argument('--act_type', type=str, default='Sigmoid', help='activation function') |
|
|
23 |
parser.add_argument('--input_size_omic', type=int, default=80, help="input_size for omic vector") |
|
|
24 |
parser.add_argument('--input_size_path', type=int, default=512, help="input_size for path images") |
|
|
25 |
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') |
|
|
26 |
parser.add_argument('--save_at', type=int, default=20, help="adsfasdf") |
|
|
27 |
parser.add_argument('--label_dim', type=int, default=1, help='size of output') |
|
|
28 |
parser.add_argument('--measure', default=1, type=int, help='disables measure while training (make program faster)') |
|
|
29 |
parser.add_argument('--verbose', default=1, type=int) |
|
|
30 |
parser.add_argument('--print_every', default=0, type=int) |
|
|
31 |
|
|
|
32 |
parser.add_argument('--optimizer_type', type=str, default='adam') |
|
|
33 |
parser.add_argument('--beta1', type=float, default=0.9, help='0.9, 0.5 | 0.25 | 0') |
|
|
34 |
parser.add_argument('--beta2', type=float, default=0.999, help='0.9, 0.5 | 0.25 | 0') |
|
|
35 |
parser.add_argument('--lr_policy', default='linear', type=str, help='5e-4 for Adam | 1e-3 for AdaBound') |
|
|
36 |
parser.add_argument('--finetune', default=1, type=int, help='5e-4 for Adam | 1e-3 for AdaBound') |
|
|
37 |
parser.add_argument('--final_lr', default=0.1, type=float, help='Used for AdaBound') |
|
|
38 |
parser.add_argument('--reg_type', default='omic', type=str, help="regularization type") |
|
|
39 |
parser.add_argument('--niter', type=int, default=0, help='# of iter at starting learning rate') |
|
|
40 |
parser.add_argument('--niter_decay', type=int, default=25, help='# of iter to linearly decay learning rate to zero') |
|
|
41 |
parser.add_argument('--epoch_count', type=int, default=1, help='start of epoch') |
|
|
42 |
parser.add_argument('--batch_size', type=int, default=32, help="Number of batches to train/test for. Default: 256") |
|
|
43 |
|
|
|
44 |
parser.add_argument('--lambda_cox', type=float, default=1) |
|
|
45 |
parser.add_argument('--lambda_reg', type=float, default=3e-4) |
|
|
46 |
parser.add_argument('--lambda_nll', type=float, default=1) |
|
|
47 |
|
|
|
48 |
|
|
|
49 |
parser.add_argument('--fusion_type', type=str, default="pofusion", help='concat | pofusion') |
|
|
50 |
parser.add_argument('--skip', type=int, default=0) |
|
|
51 |
parser.add_argument('--use_bilinear', type=int, default=1) |
|
|
52 |
parser.add_argument('--path_gate', type=int, default=1) |
|
|
53 |
parser.add_argument('--grph_gate', type=int, default=1) |
|
|
54 |
parser.add_argument('--omic_gate', type=int, default=1) |
|
|
55 |
parser.add_argument('--path_dim', type=int, default=32) |
|
|
56 |
parser.add_argument('--grph_dim', type=int, default=32) |
|
|
57 |
parser.add_argument('--omic_dim', type=int, default=32) |
|
|
58 |
parser.add_argument('--path_scale', type=int, default=1) |
|
|
59 |
parser.add_argument('--grph_scale', type=int, default=1) |
|
|
60 |
parser.add_argument('--omic_scale', type=int, default=1) |
|
|
61 |
parser.add_argument('--mmhid', type=int, default=64) |
|
|
62 |
|
|
|
63 |
parser.add_argument('--init_type', type=str, default='none', help='network initialization [normal | xavier | kaiming | orthogonal | max]. Max seems to work well') |
|
|
64 |
parser.add_argument('--dropout_rate', default=0.25, type=float, help='0 - 0.25. Increasing dropout_rate helps overfitting. Some people have gone as high as 0.5. You can try adding more regularization') |
|
|
65 |
parser.add_argument('--use_edges', default=1, type=float, help='Using edge_attr') |
|
|
66 |
parser.add_argument('--pooling_ratio', default=0.2, type=float, help='pooling ratio for SAGPOOl') |
|
|
67 |
parser.add_argument('--lr', default=2e-3, type=float, help='5e-4 for Adam | 1e-3 for AdaBound') |
|
|
68 |
parser.add_argument('--weight_decay', default=4e-4, type=float, help='Used for Adam. L2 Regularization on weights. I normally turn this off if I am using L1. You should try') |
|
|
69 |
parser.add_argument('--GNN', default='GCN', type=str, help='GCN | GAT | SAG. graph conv mode for pooling') |
|
|
70 |
parser.add_argument('--patience', default=0.005, type=float) |
|
|
71 |
opt = parser.parse_known_args()[0] |
|
|
72 |
print_options(parser, opt) |
|
|
73 |
opt = parse_gpuids(opt) |
|
|
74 |
return opt |
|
|
75 |
|
|
|
76 |
|
|
|
77 |
def print_options(parser, opt): |
|
|
78 |
"""Print and save options |
|
|
79 |
|
|
|
80 |
It will print both current options and default values(if different). |
|
|
81 |
It will save options into a text file / [checkpoints_dir] / opt.txt |
|
|
82 |
""" |
|
|
83 |
message = '' |
|
|
84 |
message += '----------------- Options ---------------\n' |
|
|
85 |
for k, v in sorted(vars(opt).items()): |
|
|
86 |
comment = '' |
|
|
87 |
default = parser.get_default(k) |
|
|
88 |
if v != default: |
|
|
89 |
comment = '\t[default: %s]' % str(default) |
|
|
90 |
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) |
|
|
91 |
message += '----------------- End -------------------' |
|
|
92 |
print(message) |
|
|
93 |
|
|
|
94 |
# save to the disk |
|
|
95 |
expr_dir = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name) |
|
|
96 |
mkdirs(expr_dir) |
|
|
97 |
file_name = os.path.join(expr_dir, '{}_opt.txt'.format('train')) |
|
|
98 |
with open(file_name, 'wt') as opt_file: |
|
|
99 |
opt_file.write(message) |
|
|
100 |
opt_file.write('\n') |
|
|
101 |
|
|
|
102 |
|
|
|
103 |
def parse_gpuids(opt): |
|
|
104 |
# set gpu ids |
|
|
105 |
str_ids = opt.gpu_ids.split(',') |
|
|
106 |
opt.gpu_ids = [] |
|
|
107 |
for str_id in str_ids: |
|
|
108 |
id = int(str_id) |
|
|
109 |
if id >= 0: |
|
|
110 |
opt.gpu_ids.append(id) |
|
|
111 |
if len(opt.gpu_ids) > 0: |
|
|
112 |
torch.cuda.set_device(opt.gpu_ids[0]) |
|
|
113 |
|
|
|
114 |
return opt |
|
|
115 |
|
|
|
116 |
|
|
|
117 |
def mkdirs(paths): |
|
|
118 |
"""create empty directories if they don't exist |
|
|
119 |
|
|
|
120 |
Parameters: |
|
|
121 |
paths (str list) -- a list of directory paths |
|
|
122 |
""" |
|
|
123 |
if isinstance(paths, list) and not isinstance(paths, str): |
|
|
124 |
for path in paths: |
|
|
125 |
mkdir(path) |
|
|
126 |
else: |
|
|
127 |
mkdir(paths) |
|
|
128 |
|
|
|
129 |
|
|
|
130 |
def mkdir(path): |
|
|
131 |
"""create a single empty directory if it didn't exist |
|
|
132 |
|
|
|
133 |
Parameters: |
|
|
134 |
path (str) -- a single directory path |
|
|
135 |
""" |
|
|
136 |
if not os.path.exists(path): |
|
|
137 |
os.makedirs(path) |