|
a |
|
b/(1) PyTorch_HistoNet/util/computeMeanStd.py |
|
|
1 |
import numpy as np |
|
|
2 |
import torch |
|
|
3 |
from util import pause |
|
|
4 |
|
|
|
5 |
def computeMeanStd_RGB(dataloader, dataset_sizes, batch_sizeP, cuda): |
|
|
6 |
|
|
|
7 |
numBatches = np.round(dataset_sizes / batch_sizeP) |
|
|
8 |
|
|
|
9 |
pop_mean_R = [] |
|
|
10 |
pop_mean_G = [] |
|
|
11 |
pop_mean_B = [] |
|
|
12 |
pop_std0_R = [] |
|
|
13 |
pop_std0_G = [] |
|
|
14 |
pop_std0_B = [] |
|
|
15 |
|
|
|
16 |
for i, (data, y) in enumerate(dataloader): |
|
|
17 |
|
|
|
18 |
# display |
|
|
19 |
if i % 100 == 0: |
|
|
20 |
print("\tBatch n. {0} / {1}".format(i, int(numBatches))) |
|
|
21 |
|
|
|
22 |
if cuda: |
|
|
23 |
data = data.to('cuda') |
|
|
24 |
|
|
|
25 |
# shape (3,) |
|
|
26 |
batch_mean_R = torch.mean(data[:,0,:]) |
|
|
27 |
batch_mean_G = torch.mean(data[:,1,:]) |
|
|
28 |
batch_mean_B = torch.mean(data[:,2,:]) |
|
|
29 |
|
|
|
30 |
batch_std0_R = torch.std(data[:,0,:]) |
|
|
31 |
batch_std0_G = torch.std(data[:,1,:]) |
|
|
32 |
batch_std0_B = torch.std(data[:,2,:]) |
|
|
33 |
|
|
|
34 |
if cuda: |
|
|
35 |
batch_mean_R = batch_mean_R.detach().to('cpu') |
|
|
36 |
batch_mean_G = batch_mean_G.detach().to('cpu') |
|
|
37 |
batch_mean_B = batch_mean_B.detach().to('cpu') |
|
|
38 |
batch_std0_R = batch_std0_R.detach().to('cpu') |
|
|
39 |
batch_std0_G = batch_std0_G.detach().to('cpu') |
|
|
40 |
batch_std0_B = batch_std0_B.detach().to('cpu') |
|
|
41 |
|
|
|
42 |
pop_mean_R.append(batch_mean_R) |
|
|
43 |
pop_mean_G.append(batch_mean_G) |
|
|
44 |
pop_mean_B.append(batch_mean_B) |
|
|
45 |
pop_std0_R.append(batch_std0_R) |
|
|
46 |
pop_std0_G.append(batch_std0_G) |
|
|
47 |
pop_std0_B.append(batch_std0_B) |
|
|
48 |
|
|
|
49 |
# shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,) |
|
|
50 |
pop_mean_R = np.mean(pop_mean_R) |
|
|
51 |
pop_mean_G = np.mean(pop_mean_G) |
|
|
52 |
pop_mean_B = np.mean(pop_mean_B) |
|
|
53 |
pop_std0_R = np.mean(pop_std0_R) |
|
|
54 |
pop_std0_G = np.mean(pop_std0_G) |
|
|
55 |
pop_std0_B = np.mean(pop_std0_B) |
|
|
56 |
|
|
|
57 |
return pop_mean_R, pop_mean_G, pop_mean_B, pop_std0_R, pop_std0_G, pop_std0_B |
|
|
58 |
|
|
|
59 |
|
|
|
60 |
def computeMeanStd(dataloader_all, dataset_sizes, batch_sizeP, cuda): |
|
|
61 |
|
|
|
62 |
numBatches = np.round(dataset_sizes / batch_sizeP) |
|
|
63 |
|
|
|
64 |
pop_mean = [] |
|
|
65 |
pop_std0 = [] |
|
|
66 |
|
|
|
67 |
for dataloader in dataloader_all: |
|
|
68 |
|
|
|
69 |
for i, (data, y) in enumerate(dataloader): |
|
|
70 |
|
|
|
71 |
################## |
|
|
72 |
#if i > 0: |
|
|
73 |
#break |
|
|
74 |
################## |
|
|
75 |
|
|
|
76 |
#print(data.size()) |
|
|
77 |
#pause() |
|
|
78 |
|
|
|
79 |
# display |
|
|
80 |
#if i % 100 == 0: |
|
|
81 |
#print("\tBatch n. {0} / {1}".format(i, int(numBatches))) |
|
|
82 |
|
|
|
83 |
if cuda: |
|
|
84 |
data = data.to('cuda') |
|
|
85 |
|
|
|
86 |
# shape (3,) |
|
|
87 |
batch_mean = torch.mean(data) |
|
|
88 |
batch_std0 = torch.std(data) |
|
|
89 |
|
|
|
90 |
if cuda: |
|
|
91 |
batch_mean = batch_mean.detach().to('cpu') |
|
|
92 |
batch_std0 = batch_std0.detach().to('cpu') |
|
|
93 |
|
|
|
94 |
pop_mean.append(batch_mean) |
|
|
95 |
pop_std0.append(batch_std0) |
|
|
96 |
|
|
|
97 |
#print(pop_mean) |
|
|
98 |
#print(len(pop_mean)) |
|
|
99 |
#pause() |
|
|
100 |
|
|
|
101 |
#if i > 100: |
|
|
102 |
#break |
|
|
103 |
|
|
|
104 |
# shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,) |
|
|
105 |
pop_mean = np.mean(pop_mean) |
|
|
106 |
pop_std0 = np.mean(pop_std0) |
|
|
107 |
|
|
|
108 |
return pop_mean, pop_std0 |