|
a |
|
b/python/features_ECG.py |
|
|
1 |
#!/usr/bin/env python |
|
|
2 |
|
|
|
3 |
""" |
|
|
4 |
features_ECG.py |
|
|
5 |
|
|
|
6 |
VARPA, University of Coruna |
|
|
7 |
Mondejar Guerra, Victor M. |
|
|
8 |
23 Oct 2017 |
|
|
9 |
""" |
|
|
10 |
|
|
|
11 |
import numpy as np |
|
|
12 |
from scipy.signal import medfilt |
|
|
13 |
import scipy.stats |
|
|
14 |
import pywt |
|
|
15 |
import operator |
|
|
16 |
|
|
|
17 |
from mit_db import * |
|
|
18 |
|
|
|
19 |
# Input: the R-peaks from a signal |
|
|
20 |
# Return: the features RR intervals |
|
|
21 |
# (pre_RR, post_RR, local_RR, global_RR) |
|
|
22 |
# for each beat |
|
|
23 |
def compute_RR_intervals(R_poses): |
|
|
24 |
features_RR = RR_intervals() |
|
|
25 |
|
|
|
26 |
pre_R = np.array([], dtype=int) |
|
|
27 |
post_R = np.array([], dtype=int) |
|
|
28 |
local_R = np.array([], dtype=int) |
|
|
29 |
global_R = np.array([], dtype=int) |
|
|
30 |
|
|
|
31 |
# Pre_R and Post_R |
|
|
32 |
pre_R = np.append(pre_R, 0) |
|
|
33 |
post_R = np.append(post_R, R_poses[1] - R_poses[0]) |
|
|
34 |
|
|
|
35 |
for i in range(1, len(R_poses)-1): |
|
|
36 |
pre_R = np.append(pre_R, R_poses[i] - R_poses[i-1]) |
|
|
37 |
post_R = np.append(post_R, R_poses[i+1] - R_poses[i]) |
|
|
38 |
|
|
|
39 |
pre_R[0] = pre_R[1] |
|
|
40 |
pre_R = np.append(pre_R, R_poses[-1] - R_poses[-2]) |
|
|
41 |
|
|
|
42 |
post_R = np.append(post_R, post_R[-1]) |
|
|
43 |
|
|
|
44 |
# Local_R: AVG from last 10 pre_R values |
|
|
45 |
for i in range(0, len(R_poses)): |
|
|
46 |
num = 0 |
|
|
47 |
avg_val = 0 |
|
|
48 |
for j in range(-9, 1): |
|
|
49 |
if j+i >= 0: |
|
|
50 |
avg_val = avg_val + pre_R[i+j] |
|
|
51 |
num = num +1 |
|
|
52 |
local_R = np.append(local_R, avg_val / float(num)) |
|
|
53 |
|
|
|
54 |
# Global R AVG: from full past-signal |
|
|
55 |
# TODO: AVG from past 5 minutes = 108000 samples |
|
|
56 |
global_R = np.append(global_R, pre_R[0]) |
|
|
57 |
for i in range(1, len(R_poses)): |
|
|
58 |
num = 0 |
|
|
59 |
avg_val = 0 |
|
|
60 |
|
|
|
61 |
for j in range( 0, i): |
|
|
62 |
if (R_poses[i] - R_poses[j]) < 108000: |
|
|
63 |
avg_val = avg_val + pre_R[j] |
|
|
64 |
num = num + 1 |
|
|
65 |
#num = i |
|
|
66 |
global_R = np.append(global_R, avg_val / float(num)) |
|
|
67 |
|
|
|
68 |
for i in range(0, len(R_poses)): |
|
|
69 |
features_RR.pre_R = np.append(features_RR.pre_R, pre_R[i]) |
|
|
70 |
features_RR.post_R = np.append(features_RR.post_R, post_R[i]) |
|
|
71 |
features_RR.local_R = np.append(features_RR.local_R, local_R[i]) |
|
|
72 |
features_RR.global_R = np.append(features_RR.global_R, global_R[i]) |
|
|
73 |
|
|
|
74 |
#features_RR.append([pre_R[i], post_R[i], local_R[i], global_R[i]]) |
|
|
75 |
|
|
|
76 |
return features_RR |
|
|
77 |
|
|
|
78 |
# Compute the wavelet descriptor for a beat |
|
|
79 |
def compute_wavelet_descriptor(beat, family, level): |
|
|
80 |
wave_family = pywt.Wavelet(family) |
|
|
81 |
coeffs = pywt.wavedec(beat, wave_family, level=level) |
|
|
82 |
return coeffs[0] |
|
|
83 |
|
|
|
84 |
# Compute my descriptor based on amplitudes of several intervals |
|
|
85 |
def compute_my_own_descriptor(beat, winL, winR): |
|
|
86 |
R_pos = int((winL + winR) / 2) |
|
|
87 |
|
|
|
88 |
R_value = beat[R_pos] |
|
|
89 |
my_morph = np.zeros((4)) |
|
|
90 |
y_values = np.zeros(4) |
|
|
91 |
x_values = np.zeros(4) |
|
|
92 |
# Obtain (max/min) values and index from the intervals |
|
|
93 |
[x_values[0], y_values[0]] = max(enumerate(beat[0:40]), key=operator.itemgetter(1)) |
|
|
94 |
[x_values[1], y_values[1]] = min(enumerate(beat[75:85]), key=operator.itemgetter(1)) |
|
|
95 |
[x_values[2], y_values[2]] = min(enumerate(beat[95:105]), key=operator.itemgetter(1)) |
|
|
96 |
[x_values[3], y_values[3]] = max(enumerate(beat[150:180]), key=operator.itemgetter(1)) |
|
|
97 |
|
|
|
98 |
x_values[1] = x_values[1] + 75 |
|
|
99 |
x_values[2] = x_values[2] + 95 |
|
|
100 |
x_values[3] = x_values[3] + 150 |
|
|
101 |
|
|
|
102 |
# Norm data before compute distance |
|
|
103 |
x_max = max(x_values) |
|
|
104 |
y_max = max(np.append(y_values, R_value)) |
|
|
105 |
x_min = min(x_values) |
|
|
106 |
y_min = min(np.append(y_values, R_value)) |
|
|
107 |
|
|
|
108 |
R_pos = (R_pos - x_min) / (x_max - x_min) |
|
|
109 |
R_value = (R_value - y_min) / (y_max - y_min) |
|
|
110 |
|
|
|
111 |
for n in range(0,4): |
|
|
112 |
x_values[n] = (x_values[n] - x_min) / (x_max - x_min) |
|
|
113 |
y_values[n] = (y_values[n] - y_min) / (y_max - y_min) |
|
|
114 |
x_diff = (R_pos - x_values[n]) |
|
|
115 |
y_diff = R_value - y_values[n] |
|
|
116 |
my_morph[n] = np.linalg.norm([x_diff, y_diff]) |
|
|
117 |
# TODO test with np.sqrt(np.dot(x_diff, y_diff)) |
|
|
118 |
|
|
|
119 |
if np.isnan(my_morph[n]): |
|
|
120 |
my_morph[n] = 0.0 |
|
|
121 |
|
|
|
122 |
return my_morph |
|
|
123 |
|
|
|
124 |
|
|
|
125 |
# Compute the HOS descriptor for a beat |
|
|
126 |
# Skewness (3 cumulant) and kurtosis (4 cumulant) |
|
|
127 |
def compute_hos_descriptor(beat, n_intervals, lag): |
|
|
128 |
hos_b = np.zeros(( (n_intervals-1) * 2)) |
|
|
129 |
for i in range(0, n_intervals-1): |
|
|
130 |
pose = (lag * (i+1)) |
|
|
131 |
interval = beat[(pose -(lag/2) ):(pose + (lag/2))] |
|
|
132 |
|
|
|
133 |
# Skewness |
|
|
134 |
hos_b[i] = scipy.stats.skew(interval, 0, True) |
|
|
135 |
|
|
|
136 |
if np.isnan(hos_b[i]): |
|
|
137 |
hos_b[i] = 0.0 |
|
|
138 |
|
|
|
139 |
# Kurtosis |
|
|
140 |
hos_b[(n_intervals-1) +i] = scipy.stats.kurtosis(interval, 0, False, True) |
|
|
141 |
if np.isnan(hos_b[(n_intervals-1) +i]): |
|
|
142 |
hos_b[(n_intervals-1) +i] = 0.0 |
|
|
143 |
return hos_b |
|
|
144 |
|
|
|
145 |
|
|
|
146 |
uniform_pattern_list = np.array([0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24, 28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120, 124, 126, 127, 128, |
|
|
147 |
129, 131, 135, 143, 159, 191, 192, 193, 195, 199, 207, 223, 224, 225, 227, 231, 239, 240, 241, 243, 247, 248, 249, 251, 252, 253, 254, 255]) |
|
|
148 |
# Compute the uniform LBP 1D from signal with neigh equal to number of neighbours |
|
|
149 |
# and return the 59 histogram: |
|
|
150 |
# 0-57: uniform patterns |
|
|
151 |
# 58: the non uniform pattern |
|
|
152 |
# NOTE: this method only works with neigh = 8 |
|
|
153 |
def compute_Uniform_LBP(signal, neigh=8): |
|
|
154 |
hist_u_lbp = np.zeros(59, dtype=float) |
|
|
155 |
|
|
|
156 |
avg_win_size = 2 |
|
|
157 |
# NOTE: Reduce sampling by half |
|
|
158 |
#signal_avg = scipy.signal.resample(signal, len(signal) / avg_win_size) |
|
|
159 |
|
|
|
160 |
for i in range(neigh/2, len(signal) - neigh/2): |
|
|
161 |
pattern = np.zeros(neigh) |
|
|
162 |
ind = 0 |
|
|
163 |
for n in range(-neigh/2,0) + range(1,neigh/2+1): |
|
|
164 |
if signal[i] > signal[i+n]: |
|
|
165 |
pattern[ind] = 1 |
|
|
166 |
ind += 1 |
|
|
167 |
# Convert pattern to id-int 0-255 (for neigh == 8) |
|
|
168 |
pattern_id = int("".join(str(c) for c in pattern.astype(int)), 2) |
|
|
169 |
|
|
|
170 |
# Convert id to uniform LBP id 0-57 (uniform LBP) 58: (non uniform LBP) |
|
|
171 |
if pattern_id in uniform_pattern_list: |
|
|
172 |
pattern_uniform_id = int(np.argwhere(uniform_pattern_list == pattern_id)) |
|
|
173 |
else: |
|
|
174 |
pattern_uniform_id = 58 # Non uniforms patternsuse |
|
|
175 |
|
|
|
176 |
hist_u_lbp[pattern_uniform_id] += 1.0 |
|
|
177 |
|
|
|
178 |
return hist_u_lbp |
|
|
179 |
|
|
|
180 |
|
|
|
181 |
def compute_LBP(signal, neigh=4): |
|
|
182 |
hist_u_lbp = np.zeros(np.power(2, neigh), dtype=float) |
|
|
183 |
|
|
|
184 |
avg_win_size = 2 |
|
|
185 |
# TODO: use some type of average of the data instead the full signal... |
|
|
186 |
# Average window-5 of the signal? |
|
|
187 |
#signal_avg = average_signal(signal, avg_win_size) |
|
|
188 |
signal_avg = scipy.signal.resample(signal, len(signal) / avg_win_size) |
|
|
189 |
|
|
|
190 |
for i in range(neigh/2, len(signal) - neigh/2): |
|
|
191 |
pattern = np.zeros(neigh) |
|
|
192 |
ind = 0 |
|
|
193 |
for n in range(-neigh/2,0) + range(1,neigh/2+1): |
|
|
194 |
if signal[i] > signal[i+n]: |
|
|
195 |
pattern[ind] = 1 |
|
|
196 |
ind += 1 |
|
|
197 |
# Convert pattern to id-int 0-255 (for neigh == 8) |
|
|
198 |
pattern_id = int("".join(str(c) for c in pattern.astype(int)), 2) |
|
|
199 |
|
|
|
200 |
hist_u_lbp[pattern_id] += 1.0 |
|
|
201 |
|
|
|
202 |
return hist_u_lbp |
|
|
203 |
|
|
|
204 |
|
|
|
205 |
|
|
|
206 |
# https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.polynomials.hermite.html |
|
|
207 |
# Support Vector Machine-Based Expert System for Reliable Heartbeat Recognition |
|
|
208 |
# 15 hermite coefficients! |
|
|
209 |
def compute_HBF(beat): |
|
|
210 |
|
|
|
211 |
coeffs_hbf = np.zeros(15, dtype=float) |
|
|
212 |
coeffs_HBF_3 = hermfit(range(0,len(beat)), beat, 3) # 3, 4, 5, 6? |
|
|
213 |
coeffs_HBF_4 = hermfit(range(0,len(beat)), beat, 4) |
|
|
214 |
coeffs_HBF_5 = hermfit(range(0,len(beat)), beat, 5) |
|
|
215 |
#coeffs_HBF_6 = hermfit(range(0,len(beat)), beat, 6) |
|
|
216 |
|
|
|
217 |
coeffs_hbf = np.concatenate((coeffs_HBF_3, coeffs_HBF_4, coeffs_HBF_5)) |
|
|
218 |
|
|
|
219 |
return coeffs_hbf |