Switch to unified view

a b/HTNet/multi-modality/utils.py
1
from collections import defaultdict, deque
2
import datetime
3
import time
4
import torch
5
import torch.utils.data.dataset
6
import torch.distributed as dist
7
8
from sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder
9
from PIL import Image, ImageFile
10
ImageFile.LOAD_TRUNCATED_IMAGES = True
11
import pandas as pd
12
import numpy as np
13
14
import errno
15
import os
16
17
class LNMLocationDataset(torch.utils.data.dataset.Dataset):
18
    """Loading data from input file formatted as csv.
19
    """
20
    def __init__(self, infile=None, transform=None, df=None):
21
        self.transform = transform
22
        self.lbe = LabelEncoder()
23
        
24
        # Read csv file
25
        if df is None:
26
            df = pd.read_csv(infile)
27
            
28
        self.images = df['image_name']
29
        self.labels = self.lbe.fit_transform(df['tags'])
30
    
31
        self.classes = sorted(np.unique(df['tags']))
32
        self.class_to_idx = dict(zip(self.classes, range(0, len(self.classes))))
33
        
34
        self.imgs = list(zip(self.images, self.labels))
35
        self.root = '/media/storage1/project/deep_learning/ultrasound_tjmuch/ultrasound_tjmuch_data_20180105'
36
37
    def __getitem__(self, i):
38
        path = os.path.join(self.root, self.images[i])
39
        
40
        with open(path, 'rb') as f:
41
            img = Image.open(f)
42
            img = img.convert('RGB')
43
            
44
        if self.transform is not None:
45
            img = self.transform(img)
46
47
        label = torch.tensor(self.labels[i], dtype = torch.long)
48
        
49
        return img, label
50
51
    def __len__(self):
52
        return len(self.images)
53
54
55
class CSVDataset(torch.utils.data.dataset.Dataset):
56
    """Loading data from input file formatted as csv.
57
    """
58
    def __init__(self, infile=None, transform=None, df=None):
59
        self.transform = transform
60
        self.lbe = LabelEncoder()
61
        
62
        # Read csv file
63
        if df is None:
64
            df = pd.read_csv(infile)
65
            
66
        self.images = df['image_name']
67
        #self.labels = self.lbe.fit_transform(df['label'])
68
        self.labels = df['label']
69
    
70
        self.classes = sorted(np.unique(df['label']))
71
        self.class_to_idx = dict(zip(self.classes, range(0, len(self.classes))))
72
        
73
        self.imgs = list(zip(self.images, self.labels))
74
75
    def __getitem__(self, i):
76
        path = self.images[i]
77
        
78
        with open(path, 'rb') as f:
79
            img = Image.open(f)
80
            img = img.convert('RGB')
81
            
82
        if self.transform is not None:
83
            img = self.transform(img)
84
85
        label = torch.tensor(self.labels[i], dtype = torch.long)
86
        
87
        return img, label
88
89
    def __len__(self):
90
        return len(self.images)
91
92
class HTDataset(torch.utils.data.dataset.Dataset):
93
    """Loading data from input file formatted as csv.
94
    """
95
    def __init__(self, image_file, antibody_file, image_tfs=None, expression_tfs=None):
96
        self.image_tfs = image_tfs
97
        self.expression_tfs = expression_tfs
98
        
99
        df = pd.read_csv(image_file)
100
        self.images = df['image_name'].to_list()
101
        self.labels = df['label'].to_list()
102
        
103
        df = pd.read_csv(antibody_file)
104
        self.x = np.asarray(df.loc[:,["Tg","Anti-TG","Anti-TPO","T3","T4","TSH"]])
105
        self.x = torch.as_tensor(self.x, dtype=torch.float32)
106
        self.y = np.asarray(df['hashimoto_thyroiditis'])
107
   
108
        assert len(np.unique(self.labels)) == 2
109
        assert len(np.unique(self.y)) == 2
110
        
111
        self.x_pos = self.x[self.y == 1]
112
        self.x_neg = self.x[self.y == 0]
113
        
114
        self.x_pos_k = len(self.x_pos)
115
        self.x_neg_k = len(self.x_neg)
116
117
    def __getitem__(self, i):
118
        path = self.images[i]
119
        
120
        with open(path, 'rb') as f:
121
            img = Image.open(f)
122
            img = img.convert('RGB')
123
            
124
        if self.image_tfs is not None:
125
            img = self.image_tfs(img)
126
127
        if self.labels[i] == 1:
128
            #x = self.x_pos[np.random.choice(self.x_pos_k)]
129
            x = self.get_permuted_sample(self.x_pos)
130
        else:
131
            #x = self.x_neg[np.random.choice(self.x_neg_k)]
132
            x = self.get_permuted_sample(self.x_neg)
133
            
134
        label = torch.tensor(self.labels[i], dtype = torch.long)
135
        if self.expression_tfs is not None:
136
            x = self.expression_tfs(x)
137
        
138
        return img, x, label
139
140
    def __len__(self):
141
        return len(self.images)
142
143
    def get_permuted_sample(self, x):
144
        if np.random.random_sample() < 0.05:
145
            return torch.as_tensor(x[np.random.choice(len(x))], dtype=torch.float32)
146
        else:
147
            return torch.as_tensor(np.apply_along_axis(np.random.choice, 0, x), dtype=torch.float32)
148
149
150
class SmoothedValue(object):
151
    """Track a series of values and provide access to smoothed values over a
152
    window or the global series average.
153
    """
154
155
    def __init__(self, window_size=20, fmt=None):
156
        if fmt is None:
157
            fmt = "{median:.4f} ({global_avg:.4f})"
158
        self.deque = deque(maxlen=window_size)
159
        self.total = 0.0
160
        self.count = 0
161
        self.fmt = fmt
162
163
    def update(self, value, n=1):
164
        self.deque.append(value)
165
        self.count += n
166
        self.total += value * n
167
168
    def synchronize_between_processes(self):
169
        """
170
        Warning: does not synchronize the deque!
171
        """
172
        if not is_dist_avail_and_initialized():
173
            return
174
        t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
175
        dist.barrier()
176
        dist.all_reduce(t)
177
        t = t.tolist()
178
        self.count = int(t[0])
179
        self.total = t[1]
180
181
    @property
182
    def median(self):
183
        d = torch.tensor(list(self.deque))
184
        return d.median().item()
185
186
    @property
187
    def avg(self):
188
        d = torch.tensor(list(self.deque), dtype=torch.float32)
189
        return d.mean().item()
190
191
    @property
192
    def global_avg(self):
193
        return self.total / self.count
194
195
    @property
196
    def max(self):
197
        return max(self.deque)
198
199
    @property
200
    def value(self):
201
        return self.deque[-1]
202
203
    def __str__(self):
204
        return self.fmt.format(
205
            median=self.median,
206
            avg=self.avg,
207
            global_avg=self.global_avg,
208
            max=self.max,
209
            value=self.value)
210
211
212
class MetricLogger(object):
213
    def __init__(self, delimiter="\t"):
214
        self.meters = defaultdict(SmoothedValue)
215
        self.delimiter = delimiter
216
217
    def update(self, **kwargs):
218
        for k, v in kwargs.items():
219
            if isinstance(v, torch.Tensor):
220
                v = v.item()
221
            assert isinstance(v, (float, int))
222
            self.meters[k].update(v)
223
224
    def __getattr__(self, attr):
225
        if attr in self.meters:
226
            return self.meters[attr]
227
        if attr in self.__dict__:
228
            return self.__dict__[attr]
229
        raise AttributeError("'{}' object has no attribute '{}'".format(
230
            type(self).__name__, attr))
231
232
    def __str__(self):
233
        loss_str = []
234
        for name, meter in self.meters.items():
235
            loss_str.append(
236
                "{}: {}".format(name, str(meter))
237
            )
238
        return self.delimiter.join(loss_str)
239
240
    def synchronize_between_processes(self):
241
        for meter in self.meters.values():
242
            meter.synchronize_between_processes()
243
244
    def add_meter(self, name, meter):
245
        self.meters[name] = meter
246
247
    def log_every(self, iterable, print_freq, header=None):
248
        i = 0
249
        if not header:
250
            header = ''
251
        start_time = time.time()
252
        end = time.time()
253
        iter_time = SmoothedValue(fmt='{avg:.4f}')
254
        data_time = SmoothedValue(fmt='{avg:.4f}')
255
        space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
256
        if torch.cuda.is_available():
257
            log_msg = self.delimiter.join([
258
                header,
259
                '[{0' + space_fmt + '}/{1}]',
260
                'eta: {eta}',
261
                '{meters}',
262
                'time: {time}',
263
                'data: {data}',
264
                'max mem: {memory:.0f}'
265
            ])
266
        else:
267
            log_msg = self.delimiter.join([
268
                header,
269
                '[{0' + space_fmt + '}/{1}]',
270
                'eta: {eta}',
271
                '{meters}',
272
                'time: {time}',
273
                'data: {data}'
274
            ])
275
        MB = 1024.0 * 1024.0
276
        for obj in iterable:
277
            data_time.update(time.time() - end)
278
            yield obj
279
            iter_time.update(time.time() - end)
280
            if i % print_freq == 0:
281
                eta_seconds = iter_time.global_avg * (len(iterable) - i)
282
                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
283
                if torch.cuda.is_available():
284
                    print(log_msg.format(
285
                        i, len(iterable), eta=eta_string,
286
                        meters=str(self),
287
                        time=str(iter_time), data=str(data_time),
288
                        memory=torch.cuda.max_memory_allocated() / MB))
289
                else:
290
                    print(log_msg.format(
291
                        i, len(iterable), eta=eta_string,
292
                        meters=str(self),
293
                        time=str(iter_time), data=str(data_time)))
294
            i += 1
295
            end = time.time()
296
        total_time = time.time() - start_time
297
        total_time_str = str(datetime.timedelta(seconds=int(total_time)))
298
        print('{} Total time: {}'.format(header, total_time_str))
299
300
301
def accuracy(output, target, topk=(1,)):
302
    """Computes the accuracy over the k top predictions for the specified values of k"""
303
    with torch.no_grad():
304
        maxk = max(topk)
305
        batch_size = target.size(0)
306
307
        _, pred = output.topk(maxk, 1, True, True)
308
        pred = pred.t()
309
        correct = pred.eq(target[None])
310
311
        res = []
312
        for k in topk:
313
            correct_k = correct[:k].flatten().sum(dtype=torch.float32)
314
            res.append(correct_k * (100.0 / batch_size))
315
        return res
316
317
318
def mkdir(path):
319
    try:
320
        os.makedirs(path)
321
    except OSError as e:
322
        if e.errno != errno.EEXIST:
323
            raise
324
325
326
def setup_for_distributed(is_master):
327
    """
328
    This function disables printing when not in master process
329
    """
330
    import builtins as __builtin__
331
    builtin_print = __builtin__.print
332
333
    def print(*args, **kwargs):
334
        force = kwargs.pop('force', False)
335
        if is_master or force:
336
            builtin_print(*args, **kwargs)
337
338
    __builtin__.print = print
339
340
341
def is_dist_avail_and_initialized():
342
    if not dist.is_available():
343
        return False
344
    if not dist.is_initialized():
345
        return False
346
    return True
347
348
349
def get_world_size():
350
    if not is_dist_avail_and_initialized():
351
        return 1
352
    return dist.get_world_size()
353
354
355
def get_rank():
356
    if not is_dist_avail_and_initialized():
357
        return 0
358
    return dist.get_rank()
359
360
361
def is_main_process():
362
    return get_rank() == 0
363
364
365
def save_on_master(*args, **kwargs):
366
    if is_main_process():
367
        torch.save(*args, **kwargs)
368
369
370
def init_distributed_mode(args):
371
    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
372
        args.rank = int(os.environ["RANK"])
373
        args.world_size = int(os.environ['WORLD_SIZE'])
374
        args.gpu = int(os.environ['LOCAL_RANK'])
375
    elif 'SLURM_PROCID' in os.environ:
376
        args.rank = int(os.environ['SLURM_PROCID'])
377
        args.gpu = args.rank % torch.cuda.device_count()
378
    elif hasattr(args, "rank"):
379
        pass
380
    else:
381
        print('Not using distributed mode')
382
        args.distributed = False
383
        return
384
385
    args.distributed = True
386
387
    torch.cuda.set_device(args.gpu)
388
    args.dist_backend = 'nccl'
389
    print('| distributed init (rank {}): {}'.format(
390
        args.rank, args.dist_url), flush=True)
391
    torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
392
                                         world_size=args.world_size, rank=args.rank)
393
    setup_for_distributed(args.rank == 0)