|
a |
|
b/darkflow/dark/connected.py |
|
|
1 |
from .layer import Layer |
|
|
2 |
import numpy as np |
|
|
3 |
|
|
|
4 |
|
|
|
5 |
class extract_layer(Layer): |
|
|
6 |
def setup(self, old_inp, old_out, |
|
|
7 |
activation, inp, out): |
|
|
8 |
if inp is None: inp = range(old_inp) |
|
|
9 |
self.activation = activation |
|
|
10 |
self.old_inp = old_inp |
|
|
11 |
self.old_out = old_out |
|
|
12 |
self.inp = inp |
|
|
13 |
self.out = out |
|
|
14 |
self.wshape = { |
|
|
15 |
'biases': [len(self.out)], |
|
|
16 |
'weights': [len(self.inp), len(self.out)] |
|
|
17 |
} |
|
|
18 |
|
|
|
19 |
@property |
|
|
20 |
def signature(self): |
|
|
21 |
sig = ['connected'] |
|
|
22 |
sig += self._signature[1:-2] |
|
|
23 |
return sig |
|
|
24 |
|
|
|
25 |
def present(self): |
|
|
26 |
args = self.signature |
|
|
27 |
self.presenter = connected_layer(*args) |
|
|
28 |
|
|
|
29 |
def recollect(self, val): |
|
|
30 |
w = val['weights'] |
|
|
31 |
b = val['biases'] |
|
|
32 |
if w is None: self.w = val; return |
|
|
33 |
w = np.take(w, self.inp, 0) |
|
|
34 |
w = np.take(w, self.out, 1) |
|
|
35 |
b = np.take(b, self.out) |
|
|
36 |
assert1 = w.shape == tuple(self.wshape['weights']) |
|
|
37 |
assert2 = b.shape == tuple(self.wshape['biases']) |
|
|
38 |
assert assert1 and assert2, \ |
|
|
39 |
'Dimension does not match in {} recollect'.format( |
|
|
40 |
self._signature) |
|
|
41 |
|
|
|
42 |
self.w['weights'] = w |
|
|
43 |
self.w['biases'] = b |
|
|
44 |
|
|
|
45 |
|
|
|
46 |
class select_layer(Layer): |
|
|
47 |
def setup(self, inp, old, |
|
|
48 |
activation, inp_idx, |
|
|
49 |
out, keep, train): |
|
|
50 |
self.old = old |
|
|
51 |
self.keep = keep |
|
|
52 |
self.train = train |
|
|
53 |
self.inp_idx = inp_idx |
|
|
54 |
self.activation = activation |
|
|
55 |
inp_dim = inp |
|
|
56 |
if inp_idx is not None: |
|
|
57 |
inp_dim = len(inp_idx) |
|
|
58 |
self.inp = inp_dim |
|
|
59 |
self.out = out |
|
|
60 |
self.wshape = { |
|
|
61 |
'biases': [out], |
|
|
62 |
'weights': [inp_dim, out] |
|
|
63 |
} |
|
|
64 |
|
|
|
65 |
@property |
|
|
66 |
def signature(self): |
|
|
67 |
sig = ['connected'] |
|
|
68 |
sig += self._signature[1:-4] |
|
|
69 |
return sig |
|
|
70 |
|
|
|
71 |
def present(self): |
|
|
72 |
args = self.signature |
|
|
73 |
self.presenter = connected_layer(*args) |
|
|
74 |
|
|
|
75 |
def recollect(self, val): |
|
|
76 |
w = val['weights'] |
|
|
77 |
b = val['biases'] |
|
|
78 |
if w is None: self.w = val; return |
|
|
79 |
if self.inp_idx is not None: |
|
|
80 |
w = np.take(w, self.inp_idx, 0) |
|
|
81 |
|
|
|
82 |
keep_b = np.take(b, self.keep) |
|
|
83 |
keep_w = np.take(w, self.keep, 1) |
|
|
84 |
train_b = b[self.train:] |
|
|
85 |
train_w = w[:, self.train:] |
|
|
86 |
self.w['biases'] = np.concatenate( |
|
|
87 |
(keep_b, train_b), axis=0) |
|
|
88 |
self.w['weights'] = np.concatenate( |
|
|
89 |
(keep_w, train_w), axis=1) |
|
|
90 |
|
|
|
91 |
|
|
|
92 |
class connected_layer(Layer): |
|
|
93 |
def setup(self, input_size, |
|
|
94 |
output_size, activation): |
|
|
95 |
self.activation = activation |
|
|
96 |
self.inp = input_size |
|
|
97 |
self.out = output_size |
|
|
98 |
self.wshape = { |
|
|
99 |
'biases': [self.out], |
|
|
100 |
'weights': [self.inp, self.out] |
|
|
101 |
} |
|
|
102 |
|
|
|
103 |
def finalize(self, transpose): |
|
|
104 |
weights = self.w['weights'] |
|
|
105 |
if weights is None: return |
|
|
106 |
shp = self.wshape['weights'] |
|
|
107 |
if not transpose: |
|
|
108 |
weights = weights.reshape(shp[::-1]) |
|
|
109 |
weights = weights.transpose([1, 0]) |
|
|
110 |
else: |
|
|
111 |
weights = weights.reshape(shp) |
|
|
112 |
self.w['weights'] = weights |