a b/Inference/utils.py
1
import numpy as np
2
import scipy.signal
3
import scipy.io
4
from sklearn.preprocessing import StandardScaler
5
import pickle
6
from glob import glob
7
import os
8
import pyedflib
9
from tqdm import tqdm
10
from network import IncUNet_HR,IncUNet_BR
11
12
13
import pandas as pd
14
import torch
15
from torch.utils.data import TensorDataset,DataLoader
16
from torch.autograd import Variable
17
18
def custom_resample(ECG,fs):
19
    modified_ECG = []
20
    for i in range(int(len(ECG) * 500/fs)):
21
        modified_ECG.append(ECG[int(fs/500*i)].astype(float))
22
    return modified_ECG
23
24
def peak_correction(peak_locs,ecg_records):
25
    
26
    mod_peak_locs = []
27
    ecg_records = ecg_records.cpu().numpy()
28
    for j in range(len(peak_locs)): 
29
        mod_peak_locs.append(np.asarray([peak_locs[j][i] - 37 + np.argmax(ecg_records[j,0,peak_locs[j][i]-37:peak_locs[j][i]+37]) for i in range(len(peak_locs[j])) if(peak_locs[j][i]>37 and peak_locs[j][i]<5000-37)]))
30
    return mod_peak_locs
31
32
33
def obtain_ecg_record(path_records,fs,window_size = 5000):
34
    
35
    scaler = StandardScaler()
36
    ecg_records = []
37
    actual_ecg_windows = []           
38
    for path in path_records:
39
        ECG = scipy.io.loadmat(path)['ecg']
40
        if(ECG.shape[0] != 1):
41
            ECG = ECG[:,0]
42
        else:
43
            ECG = ECG[0,:]
44
        ECG = np.asarray(custom_resample(ECG,fs))
45
        ### Scaling the ECG for every 5000 records(Speed vs Accuracy)
46
        ecg_windows = []
47
        for record_no in range(len(ECG)//(window_size-500)):
48
            ecg_windows.append(scaler.fit_transform(ECG[4500*record_no : 4500*record_no + window_size].reshape(-1,1)))
49
        for record_no in range(len(ECG)//(window_size)):
50
            actual_ecg_windows.append(scaler.fit_transform(ECG[5000*record_no : 5000*record_no + window_size].reshape(-1,1)))       
51
        ecg_records.append(np.asarray(ecg_windows))
52
    initial = np.zeros((1,5000))
53
    for ecg_record in ecg_records:       
54
        if(ecg_record[-1].shape[0] != 5000):
55
            ecg_record[-1] = np.vstack((ecg_record[-1],np.zeros(5000 - ecg_record[-1].shape[0]).reshape(-1,1)))
56
        for window_no in range(len(ecg_record)):            
57
            initial = np.vstack((initial,ecg_record[window_no][:,0].reshape(1,-1)))
58
    
59
    ecg_records = initial[1:,:]
60
    actual_ecg_windows = np.asarray(actual_ecg_windows)[:,:,0]
61
    return ecg_records,actual_ecg_windows
62
63
    return ecg_records
64
65
def load_model_HR(SAVED_MODEL_PATH,test_loader,device,batch_len,window_size):
66
        
67
        C,H,W = 1,1,5000
68
        loaded_model = IncUNet_HR(in_shape=(C,H,W))    
69
        loaded_model.load_state_dict(torch.load(SAVED_MODEL_PATH, map_location = lambda storage, loc: storage, pickle_module=pickle))
70
        loaded_model.to(device)
71
        loaded_model.eval()
72
        print("-------- Evaluation --------")
73
        loaded_model.eval()
74
        net_test_loss = 0   
75
        pred_peaks = []
76
        peak_array = np.array([1])
77
        with torch.no_grad():
78
            pred_peak = []
79
            for step,(x) in tqdm(enumerate(test_loader)):
80
                x = Variable(x[0].to(device))
81
                y_predict_test = loaded_model(x)
82
                if(str(device)[:4] == 'cuda'):                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             
83
                    y_predict_test = y_predict_test.cpu().numpy()
84
                    y_predict_test = y_predict_test[:,0,:]
85
                else:
86
                    y_predict_test = y_predict_test.numpy()
87
                    y_predict_test = y_predict_test[:,0,:]
88
                predicted_peak_locs = peak_finder(y_predict_test,x)
89
                predicted_peak_locs = np.asarray(predicted_peak_locs) 
90
                for i in range(len(predicted_peak_locs)):
91
                    predicted_peak_locs_new = predicted_peak_locs[i][(predicted_peak_locs[i] >= 0.5*500) & (predicted_peak_locs[i] <= 9.5*500)]
92
                    pred_peaks.append(np.asarray(predicted_peak_locs_new + i*(window_size - 500) + step * batch_len * (window_size - 500)).astype(int))
93
            for i in range(len(pred_peaks)):
94
                peak_array = np.hstack((peak_array,pred_peaks[i])) 
95
            actual_peak_locs = peak_array[1:]  ### As peak locs were initialized with one. 
96
        return actual_peak_locs                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     
97
98
def load_model_BR(SAVED_MODEL_PATH,test_loader,device,batch_len,window_size):
99
        
100
    C,H,W = 1,1,5000
101
    loadedModel = IncUNet_BR(in_shape=(C,H,W))
102
    loadedModel.load_state_dict(torch.load(SAVED_MODEL_PATH)["model"])
103
    loadedModel.eval()
104
    print("-------- Evaluation --------")
105
    net_test_loss = 0   
106
    
107
    fs_upsample = 500
108
    fs_BR = 125
109
    no_sec = 10
110
    breathsPosition = np.array([])
111
    middle_start, middle_end = no_sec*fs_BR//4, 3*no_sec*fs_BR//4
112
    start = 0
113
    for step,x in enumerate(test_loader):
114
        
115
        valleys, predicted = np.array(getBR(x[0], loadedModel))        
116
        current = valleys[(valleys >= middle_start) & ( valleys <= middle_end)]
117
        startBR = int(start/(fs_upsample/fs_BR))
118
        if start == 0:
119
            breathsPosition = np.append(breathsPosition,valleys[valleys < middle_end])  
120
        else:
121
            breathsPosition = np.append(breathsPosition, current + startBR)
122
123
        start += 2500
124
    return breathsPosition
125
126
def peak_finder(y_pred_array,x_test):
127
   
128
    fs_ = 500 
129
    peak_locs = []
130
    for i in range(y_pred_array.shape[0]):
131
        peak_locs.append(scipy.signal.find_peaks(-y_pred_array[i,:],distance = 120,height = -0.4, prominence = 0.035)[0])
132
        peak_locs[i] = peak_locs[i][(peak_locs[i] >= 0.5*fs_) & (peak_locs[i] <= 9.5*fs_)]
133
    modified_peak_locs = peak_correction(peak_locs,x_test)
134
    return modified_peak_locs
135
136
def compute_heart_rate(r_peaks):
137
    fs_ = 500
138
    r_peaks = r_peaks[(r_peaks >= 0.5*fs_) & (r_peaks <= 9.5*fs_)]
139
    return round( 60 * fs_ / np.mean(np.diff(r_peaks)))
140
        
141
def smooth(signal,window_len=50):
142
    y = pd.DataFrame(signal).rolling(window_len,center = True, min_periods = 1).mean().values.reshape((-1,))
143
    return y
144
145
def findValleys(signal, prominence = 10, is_smooth = True , distance = 10):
146
    """ Return prominent peaks and valleys based on scipy's find_peaks function """
147
    smoothened = smooth(-1*signal)
148
    valley_loc = scipy.signal.find_peaks(smoothened, prominence=0.05,  distance = 125)[0]
149
    return valley_loc
150
151
def getBR(signal, model):
152
    model.eval()
153
    with torch.no_grad():
154
        transformPredicted = model(signal)
155
    transformPredicted = transformPredicted.cpu().numpy().reshape((-1,))
156
    valleys = findValleys(transformPredicted)
157
    return valleys, transformPredicted