Diff of /algorithms.py [000000] .. [225570]

Switch to unified view

a b/algorithms.py
1
import cv2
2
import logging
3
import base64
4
import time
5
import numpy as np
6
import matplotlib.pyplot as plt
7
from vis.visual import write_on_image, visualise, activity_dict, visualise_tracking
8
from vis.processor import Processor
9
from helpers import pop_and_add, last_ip, dist, move_figure, get_hist
10
from default_params import *
11
from vis.inv_pendulum import *
12
import re
13
import pandas as pd
14
from scipy.signal import savgol_filter, lfilter
15
from model.model import LSTMModel
16
import torch
17
import math
18
19
20
def get_source(args):
21
    tagged_df = None
22
    if args.video is None:
23
        cam = cv2.VideoCapture(0)
24
    else:
25
        logging.debug(f'Video source: {args.video}')
26
        cam = cv2.VideoCapture(args.video)
27
        if isinstance(args.video, str):
28
            vid = [int(s) for s in re.findall(r'\d+', args.video)]
29
            if len(vid) == 5:
30
                tagged_df = pd.read_csv("dataset/CompleteDataSet.csv", usecols=[
31
                                        "TimeStamps", "Subject", "Activity", "Trial", "Tag"], skipinitialspace=True)
32
                tagged_df = tagged_df.query(
33
                    f'Subject == {vid[1]} & Activity == {vid[0]} & Trial == {vid[2]}')
34
    img = cam.read()[1]
35
    logging.debug('Image shape:', img.shape)
36
    return cam, tagged_df
37
38
39
def resize(img, resize, resolution):
40
    # Resize the video
41
    if resize is None:
42
        height, width = img.shape[:2]
43
    else:
44
        width, height = [int(dim) for dim in resize.split('x')]
45
    width_height = (int(width * resolution // 16) * 16,
46
                    int(height * resolution // 16) * 16)
47
    return width, height, width_height
48
49
50
def extract_keypoints_parallel(queue, args, self_counter, other_counter, consecutive_frames, event):
51
    try:
52
        cam, tagged_df = get_source(args)
53
        ret_val, img = cam.read()
54
    except Exception as e:
55
        queue.put(None)
56
        event.set()
57
        print('Exception occurred:', e)
58
        print('Most likely that the video/camera doesn\'t exist')
59
        return
60
61
    width, height, width_height = resize(img, args.resize, args.resolution)
62
    logging.debug(f'Target width and height = {width_height}')
63
    processor_singleton = Processor(width_height, args)
64
65
    output_video = None
66
67
    frame = 0
68
    fps = 0
69
    t0 = time.time()
70
    while not event.is_set():
71
        # print(args.video,self_counter.value,other_counter.value,sep=" ")
72
        if args.num_cams == 2 and (self_counter.value > other_counter.value):
73
            continue
74
75
        ret_val, img = cam.read()
76
        frame += 1
77
        self_counter.value += 1
78
        if tagged_df is None:
79
            curr_time = time.time()
80
        else:
81
            curr_time = tagged_df.iloc[frame-1]['TimeStamps'][11:]
82
            curr_time = sum(x * float(t) for x, t in zip([3600, 60, 1], curr_time.split(":")))
83
84
        if img is None:
85
            print('no more images captured')
86
            print(args.video, curr_time, sep=" ")
87
            if not event.is_set():
88
                event.set()
89
            break
90
91
        img = cv2.resize(img, (width, height))
92
        hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
93
        keypoint_sets, bb_list, width_height = processor_singleton.single_image(img)
94
        assert bb_list is None or (type(bb_list) == list)
95
        if bb_list:
96
            assert type(bb_list[0]) == tuple
97
            assert type(bb_list[0][0]) == tuple
98
        # assume bb_list is a of the form [(x1,y1),(x2,y2)),etc.]
99
100
        if args.coco_points:
101
            keypoint_sets = [keypoints.tolist() for keypoints in keypoint_sets]
102
        else:
103
            anns = [get_kp(keypoints.tolist()) for keypoints in keypoint_sets]
104
            ubboxes = [(np.asarray([width, height])*np.asarray(ann[1])).astype('int32')
105
                       for ann in anns]
106
            lbboxes = [(np.asarray([width, height])*np.asarray(ann[2])).astype('int32')
107
                       for ann in anns]
108
            bbox_list = [(np.asarray([width, height])*np.asarray(box)).astype('int32') for box in bb_list]
109
            uhist_list = [get_hist(hsv_img, bbox) for bbox in ubboxes]
110
            lhist_list = [get_hist(img, bbox) for bbox in lbboxes]
111
            keypoint_sets = [{"keypoints": keyp[0], "up_hist":uh, "lo_hist":lh, "time":curr_time, "box":box}
112
                             for keyp, uh, lh, box in zip(anns, uhist_list, lhist_list, bbox_list)]
113
114
            cv2.polylines(img, ubboxes, True, (255, 0, 0), 2)
115
            cv2.polylines(img, lbboxes, True, (0, 255, 0), 2)
116
            for box in bbox_list:
117
                cv2.rectangle(img, tuple(box[0]), tuple(box[1]), ((0, 0, 255)), 2)
118
119
        dict_vis = {"img": img, "keypoint_sets": keypoint_sets, "width": width, "height": height, "vis_keypoints": args.joints,
120
                    "vis_skeleton": args.skeleton, "CocoPointsOn": args.coco_points,
121
                    "tagged_df": {"text": f"Avg FPS: {frame//(time.time()-t0)}, Frame: {frame}", "color": [0, 0, 0]}}
122
        queue.put(dict_vis)
123
124
    queue.put(None)
125
    return
126
127
128
###################################################### Post human estimation ###########################################################
129
130
131
def show_tracked_img(img_dict, ip_set, num_matched, output_video, args):
132
    img = img_dict["img"]
133
    tagged_df = img_dict["tagged_df"]
134
    keypoints_frame = [person[-1] for person in ip_set]
135
    img = visualise_tracking(img=img, keypoint_sets=keypoints_frame, width=img_dict["width"], height=img_dict["height"],
136
                             num_matched=num_matched, vis_keypoints=img_dict["vis_keypoints"], vis_skeleton=img_dict["vis_skeleton"],
137
                             CocoPointsOn=False)
138
139
    img = write_on_image(img=img, text=tagged_df["text"],
140
                         color=tagged_df["color"])
141
142
    if output_video is None:
143
        if args.save_output:
144
            if isinstance(args.video, int):
145
                vidname = [str(args.video)+'.avi']
146
            else:
147
                vidname = args.video.split('/')
148
            filename = '/'.join(vidname[:-1])
149
            if filename:
150
                filename += '/'
151
            filename += 'out' + vidname[-1][:-3] + 'avi'
152
            output_video = cv2.VideoWriter(filename=filename, fourcc=cv2.VideoWriter_fourcc(*'MP42'),
153
                                           fps=args.fps, frameSize=img.shape[:2][::-1])
154
            logging.debug(
155
                f'Saving the output video at {filename} with {args.fps} frames per seconds')
156
        else:
157
            output_video = None
158
            logging.debug(f'Not saving the output video')
159
    else:
160
        output_video.write(img)
161
    return img, output_video
162
163
164
def remove_wrongly_matched(matched_1, matched_2):
165
166
    unmatched_idxs = []
167
    i = 0
168
169
    for ip1, ip2 in zip(matched_1, matched_2):
170
        # each of these is a set of the last t framses of each matched person
171
        correlation = cv2.compareHist(last_valid_hist(ip1)["up_hist"], last_valid_hist(ip2)["up_hist"], cv2.HISTCMP_CORREL)
172
        if correlation < 0.5*HIST_THRESH:
173
            unmatched_idxs.append(i)
174
        i += 1
175
176
    return unmatched_idxs
177
178
179
def match_unmatched(unmatched_1, unmatched_2, lstm_set1, lstm_set2, num_matched):
180
181
    new_matched_1 = []
182
    new_matched_2 = []
183
    new_lstm1 = []
184
    new_lstm2 = []
185
    final_pairs = [[], []]
186
187
    if not unmatched_1 or not unmatched_2:
188
        return final_pairs, new_matched_1, new_matched_2, new_lstm1, new_lstm2
189
190
    new_matched = 0
191
    correlation_matrix = - np.ones((len(unmatched_1), len(unmatched_2)))
192
    dist_matrix = np.zeros((len(unmatched_1), len(unmatched_2)))
193
    for i in range(len(unmatched_1)):
194
        for j in range(len(unmatched_2)):
195
            correlation_matrix[i][j] = cv2.compareHist(last_valid_hist(unmatched_1[i])["up_hist"],
196
                                                       last_valid_hist(unmatched_2[j])["up_hist"], cv2.HISTCMP_CORREL)
197
            dist_matrix[i][j] = np.sum(np.absolute(last_valid_hist(unmatched_1[i])["up_hist"]-last_valid_hist(unmatched_2[j])["up_hist"]))
198
199
    freelist_1 = [i for i in range(len(unmatched_1))]
200
    pair_21 = [-1]*len(unmatched_2)
201
    unmatched_1_preferences = np.argsort(-correlation_matrix, axis=1)
202
    # print("cor", correlation_matrix, sep="\n")
203
    # print("unmatched_1", unmatched_1_preferences, sep="\n")
204
    unmatched_indexes1 = [0]*len(unmatched_1)
205
    finish_array = [False]*len(unmatched_1)
206
    while freelist_1:
207
        um1_idx = freelist_1[-1]
208
        if finish_array[um1_idx] == True:
209
            freelist_1.pop()
210
            continue
211
        next_unasked_2 = unmatched_1_preferences[um1_idx][unmatched_indexes1[um1_idx]]
212
        if pair_21[next_unasked_2] == -1:
213
            pair_21[next_unasked_2] = um1_idx
214
            freelist_1.pop()
215
        else:
216
            curr_paired_2 = pair_21[next_unasked_2]
217
            if correlation_matrix[curr_paired_2][next_unasked_2] < correlation_matrix[um1_idx][next_unasked_2]:
218
                pair_21[next_unasked_2] = um1_idx
219
                freelist_1.pop()
220
                if not finish_array[curr_paired_2]:
221
                    freelist_1.append(curr_paired_2)
222
223
        unmatched_indexes1[um1_idx] += 1
224
        if unmatched_indexes1[um1_idx] == len(unmatched_2):
225
            finish_array[um1_idx] = True
226
227
    for j, i in enumerate(pair_21):
228
        if correlation_matrix[i][j] > HIST_THRESH:
229
            final_pairs[0].append(i+num_matched)
230
            final_pairs[1].append(j+num_matched)
231
            new_matched_1.append(unmatched_1[i])
232
            new_matched_2.append(unmatched_2[j])
233
            new_lstm1.append(lstm_set1[i])
234
            new_lstm2.append(lstm_set2[j])
235
236
    # print("finalpairs", final_pairs, sep="\n")
237
238
    return final_pairs, new_matched_1, new_matched_2, new_lstm1, new_lstm2
239
240
241
def alg2_sequential(queues, argss, consecutive_frames, event):
242
    model = LSTMModel(h_RNN=48, h_RNN_layers=2, drop_p=0.1, num_classes=7)
243
    model.load_state_dict(torch.load('model/lstm_weights.sav',map_location=argss[0].device))
244
    model.eval()
245
    output_videos = [None for _ in range(argss[0].num_cams)]
246
    t0 = time.time()
247
    feature_plotters = [[[], [], [], [], []] for _ in range(argss[0].num_cams)]
248
    ip_sets = [[] for _ in range(argss[0].num_cams)]
249
    lstm_sets = [[] for _ in range(argss[0].num_cams)]
250
    max_length_mat = 300
251
    num_matched = 0
252
    if not argss[0].plot_graph:
253
        max_length_mat = consecutive_frames
254
    else:
255
        f, ax = plt.subplots()
256
        move_figure(f, 800, 100)
257
    window_names = [args.video if isinstance(args.video, str) else 'Cam '+str(args.video) for args in argss]
258
    [cv2.namedWindow(window_name) for window_name in window_names]
259
    while True:
260
261
        # if not queue1.empty() and not queue2.empty():
262
        if not any(q.empty() for q in queues):
263
            dict_frames = [q.get() for q in queues]
264
265
            if any([(dict_frame is None) for dict_frame in dict_frames]):
266
                if not event.is_set():
267
                    event.set()
268
                break
269
270
            if cv2.waitKey(1) == 27 or any(cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1 for window_name in window_names):
271
                if not event.is_set():
272
                    event.set()
273
274
            kp_frames = [dict_frame["keypoint_sets"] for dict_frame in dict_frames]
275
            if argss[0].num_cams == 1:
276
                num_matched, new_num, indxs_unmatched = match_ip(ip_sets[0], kp_frames[0], lstm_sets[0], num_matched, max_length_mat)
277
                valid1_idxs, prediction = get_all_features(ip_sets[0], lstm_sets[0], model)
278
                dict_frames[0]["tagged_df"]["text"] += f" Pred: {activity_dict[prediction+5]}"
279
                img, output_videos[0] = show_tracked_img(dict_frames[0], ip_sets[0], num_matched, output_videos[0], argss[0])
280
                # print(img1.shape)
281
                cv2.imshow(window_names[0], img)
282
283
            elif argss[0].num_cams == 2:
284
                num_matched, new_num, indxs_unmatched1 = match_ip(ip_sets[0], kp_frames[0], lstm_sets[0], num_matched, max_length_mat)
285
                assert(new_num == len(ip_sets[0]))
286
                for i in sorted(indxs_unmatched1, reverse=True):
287
                    elem = ip_sets[1][i]
288
                    ip_sets[1].pop(i)
289
                    ip_sets[1].append(elem)
290
                    elem_lstm = lstm_sets[1][i]
291
                    lstm_sets[1].pop(i)
292
                    lstm_sets[1].append(elem_lstm)
293
                num_matched, new_num, indxs_unmatched2 = match_ip(ip_sets[1], kp_frames[1], lstm_sets[1], num_matched, max_length_mat)
294
295
                for i in sorted(indxs_unmatched2, reverse=True):
296
                    elem = ip_sets[0][i]
297
                    ip_sets[0].pop(i)
298
                    ip_sets[0].append(elem)
299
                    elem_lstm = lstm_sets[0][i]
300
                    lstm_sets[0].pop(i)
301
                    lstm_sets[0].append(elem_lstm)
302
303
                matched_1 = ip_sets[0][:num_matched]
304
                matched_2 = ip_sets[1][:num_matched]
305
306
                unmatch_previous = remove_wrongly_matched(matched_1, matched_2)
307
                if unmatch_previous:
308
                    print(unmatch_previous)
309
310
                for i in sorted(unmatch_previous, reverse=True):
311
                    elem1 = ip_sets[0][i]
312
                    elem2 = ip_sets[1][i]
313
                    ip_sets[0].pop(i)
314
                    ip_sets[1].pop(i)
315
                    ip_sets[0].append(elem1)
316
                    ip_sets[1].append(elem2)
317
                    elem_lstm1 = lstm_sets[0][i]
318
                    lstm_sets[0].pop(i)
319
                    lstm_sets[0].append(elem_lstm1)
320
                    elem_lstm2 = lstm_sets[1][i]
321
                    lstm_sets[1].pop(i)
322
                    lstm_sets[1].append(elem_lstm2)
323
                    num_matched -= 1
324
325
                unmatched_1 = ip_sets[0][num_matched:]
326
                unmatched_2 = ip_sets[1][num_matched:]
327
328
                new_pairs, new_matched1, new_matched2, new_lstm1, new_lstm2 = match_unmatched(
329
                    unmatched_1, unmatched_2, lstm_sets[0], lstm_sets[1], num_matched)
330
331
                new_p1 = new_pairs[0]
332
                new_p2 = new_pairs[1]
333
334
                for i in sorted(new_p1, reverse=True):
335
                    ip_sets[0].pop(i)
336
                    lstm_sets[0].pop(i)
337
                for i in sorted(new_p2, reverse=True):
338
                    ip_sets[1].pop(i)
339
                    lstm_sets[1].pop(i)
340
341
                ip_sets[0] = ip_sets[0][:num_matched] + new_matched1 + ip_sets[0][num_matched:]
342
                ip_sets[1] = ip_sets[1][:num_matched] + new_matched2 + ip_sets[1][num_matched:]
343
                lstm_sets[0] = lstm_sets[0][:num_matched] + new_lstm1 + lstm_sets[0][num_matched:]
344
                lstm_sets[1] = lstm_sets[1][:num_matched] + new_lstm2 + lstm_sets[1][num_matched:]
345
                # remember to match the energy matrices also
346
347
                num_matched = num_matched + len(new_matched1)
348
349
                # get features now
350
351
                valid1_idxs, prediction1 = get_all_features(ip_sets[0], lstm_sets[0], model)
352
                valid2_idxs, prediction2 = get_all_features(ip_sets[1], lstm_sets[1], model)
353
                dict_frames[0]["tagged_df"]["text"] += f" Pred: {activity_dict[prediction1+5]}"
354
                dict_frames[1]["tagged_df"]["text"] += f" Pred: {activity_dict[prediction2+5]}"
355
                img1, output_videos[0] = show_tracked_img(dict_frames[0], ip_sets[0], num_matched, output_videos[0], argss[0])
356
                img2, output_videos[1] = show_tracked_img(dict_frames[1], ip_sets[1], num_matched, output_videos[1], argss[1])
357
                # print(img1.shape)
358
                cv2.imshow(window_names[0], img1)
359
                cv2.imshow(window_names[1], img2)
360
361
                assert(len(lstm_sets[0]) == len(ip_sets[0]))
362
                assert(len(lstm_sets[1]) == len(ip_sets[1]))
363
364
            DEBUG = False
365
            # for ip_set, feature_plotter in zip(ip_sets, feature_plotters):
366
            #     for cnt in range(len(FEATURE_LIST)):
367
            #         plt_f = FEATURE_LIST[cnt]
368
            #         if ip_set and ip_set[0] is not None and ip_set[0][-1] is not None and plt_f in ip_set[0][-1]["features"]:
369
            #             # print(ip_set[0][-1]["features"])
370
            #             feature_plotter[cnt].append(ip_set[0][-1]["features"][plt_f])
371
            #
372
            #         else:
373
            #             # print("None")
374
            #             feature_plotter[cnt].append(0)
375
            # DEBUG = True
376
377
    cv2.destroyAllWindows()
378
    # for feature_plotter in feature_plotters:
379
    #     for i, feature_arr in enumerate(feature_plotter):
380
    #         plt.clf()
381
    #         x = np.linspace(1, len(feature_arr), len(feature_arr))
382
    #         axes = plt.gca()
383
    #         filter_array = feature_arr
384
    #         line, = axes.plot(x, filter_array, 'r-')
385
    #         plt.ylabel(FEATURE_LIST[i])
386
    #         # #plt.savefig(f'{args1.video}_{FEATURE_LIST[i]}_filter.png')
387
    #         plt.pause(1e-7)
388
389
    # for i, feature_arr in enumerate(feature_plotter2):
390
    #     plt.clf()
391
    #     x = np.linspace(1, len(feature_arr), len(feature_arr))
392
    #     axes = plt.gca()
393
    #     filter_array = feature_arr
394
    #     line, = axes.plot(x, filter_array, 'r-')
395
    #     plt.ylabel(FEATURE_LIST[i])
396
    #     # plt.savefig(f'{args2.video}_{FEATURE_LIST[i]}_filter.png')
397
    #     plt.pause(1e-7)
398
    #     # if len(re_matrix1[0]) > 0:
399
    #     #     print(np.linalg.norm(ip_sets[0][0][-1][0]['B']-ip_sets[0][0][-1][0]['H']))
400
401
    # print("P2 Over")
402
    del model
403
    return
404
405
406
def get_all_features(ip_set, lstm_set, model):
407
    valid_idxs = []
408
    invalid_idxs = []
409
    predictions = [15]*len(ip_set)  # 15 is the tag for None
410
411
    for i, ips in enumerate(ip_set):
412
        # ip set for a particular person
413
        last1 = None
414
        last2 = None
415
        for j in range(-2, -1*DEFAULT_CONSEC_FRAMES - 1, -1):
416
            if ips[j] is not None:
417
                if last1 is None:
418
                    last1 = j
419
                elif last2 is None:
420
                    last2 = j
421
        if ips[-1] is None:
422
            invalid_idxs.append(i)
423
            # continue
424
        else:
425
            ips[-1]["features"] = {}
426
            # get re, gf, angle, bounding box ratio, ratio derivative
427
            ips[-1]["features"]["height_bbox"] = get_height_bbox(ips[-1])
428
            ips[-1]["features"]["ratio_bbox"] = FEATURE_SCALAR["ratio_bbox"]*get_ratio_bbox(ips[-1])
429
430
            body_vector = ips[-1]["keypoints"]["N"] - ips[-1]["keypoints"]["B"]
431
            ips[-1]["features"]["angle_vertical"] = FEATURE_SCALAR["angle_vertical"]*get_angle_vertical(body_vector)
432
            # print(ips[-1]["features"]["angle_vertical"])
433
            ips[-1]["features"]["log_angle"] = FEATURE_SCALAR["log_angle"]*np.log(1 + np.abs(ips[-1]["features"]["angle_vertical"]))
434
435
            if last1 is None:
436
                invalid_idxs.append(i)
437
                # continue
438
            else:
439
                ips[-1]["features"]["re"] = FEATURE_SCALAR["re"]*get_rot_energy(ips[last1], ips[-1])
440
                ips[-1]["features"]["ratio_derivative"] = FEATURE_SCALAR["ratio_derivative"]*get_ratio_derivative(ips[last1], ips[-1])
441
                if last2 is None:
442
                    invalid_idxs.append(i)
443
                    # continue
444
                else:
445
                    ips[-1]["features"]["gf"] = get_gf(ips[last2], ips[last1], ips[-1])
446
                    valid_idxs.append(i)
447
448
        xdata = []
449
        if ips[-1] is None:
450
            if last1 is None:
451
                xdata = [0]*len(FEATURE_LIST)
452
            else:
453
                for feat in FEATURE_LIST[:FRAME_FEATURES]:
454
                    xdata.append(ips[last1]["features"][feat])
455
                xdata += [0]*(len(FEATURE_LIST)-FRAME_FEATURES)
456
        else:
457
            for feat in FEATURE_LIST:
458
                if feat in ips[-1]["features"]:
459
                    xdata.append(ips[-1]["features"][feat])
460
                else:
461
                    xdata.append(0)
462
463
        xdata = torch.Tensor(xdata).view(-1, 1, 5)
464
        # what is ips[-2] is none
465
        outputs, lstm_set[i][0] = model(xdata, lstm_set[i][0])
466
        if i == 0:
467
            prediction = torch.max(outputs.data, 1)[1][0].item()
468
            confidence = torch.max(outputs.data, 1)[0][0].item()
469
            fpd = True
470
            # fpd = False
471
            if fpd:
472
                if prediction in [1, 2, 3, 5]:
473
                    lstm_set[i][3] -= 1
474
                    lstm_set[i][3] = max(lstm_set[i][3], 0)
475
476
                    if lstm_set[i][2] < EMA_FRAMES:
477
                        if ips[-1] is not None:
478
                            lstm_set[i][2] += 1
479
                            lstm_set[i][1] = (lstm_set[i][1]*(lstm_set[i][2]-1) + get_height_bbox(ips[-1]))/lstm_set[i][2]
480
                    else:
481
                        if ips[-1] is not None:
482
                            lstm_set[i][1] = (1-EMA_BETA)*get_height_bbox(ips[-1]) + EMA_BETA*lstm_set[i][1]
483
484
                elif prediction == 0:
485
                    if (ips[-1] is not None and lstm_set[i][1] != 0 and \
486
                            abs(ips[-1]["features"]["angle_vertical"]) < math.pi/4) or confidence < 0.4:
487
                            # (get_height_bbox(ips[-1]) > 2*lstm_set[i][1]/3 or abs(ips[-1]["features"]["angle_vertical"]) < math.pi/4):
488
                        prediction = 7
489
                    else:
490
                        lstm_set[i][3] += 1
491
                        if lstm_set[i][3] < DEFAULT_CONSEC_FRAMES//4:
492
                            prediction = 7
493
                else:
494
                    lstm_set[i][3] -= 1
495
                    lstm_set[i][3] = max(lstm_set[i][3], 0)
496
            predictions[i] = prediction
497
498
    return valid_idxs, predictions[0] if len(predictions) > 0 else 15
499
500
501
def get_frame_features(ip_set, new_frame, re_matrix, gf_matrix, num_matched, max_length_mat=DEFAULT_CONSEC_FRAMES):
502
503
    match_ip(ip_set, new_frame, re_matrix, gf_matrix, max_length_mat)
504
    return
505
    for i in range(len(ip_set)):
506
        if ip_set[i][-1] is not None:
507
            if ip_set[i][-2] is not None:
508
                pop_and_add(re_matrix[i], get_rot_energy(
509
                            ip_set[i][-2], ip_set[i][-1]), max_length_mat)
510
            elif ip_set[i][-3] is not None:
511
                pop_and_add(re_matrix[i], get_rot_energy(
512
                            ip_set[i][-3], ip_set[i][-1]), max_length_mat)
513
            elif ip_set[i][-4] is not None:
514
                pop_and_add(re_matrix[i], get_rot_energy(
515
                            ip_set[i][-4], ip_set[i][-1]), max_length_mat)
516
            else:
517
                pop_and_add(re_matrix[i], 0, max_length_mat)
518
        else:
519
            pop_and_add(re_matrix[i], 0, max_length_mat)
520
521
    for i in range(len(ip_set)):
522
        if ip_set[i][-1] is not None:
523
            last1 = None
524
            last2 = None
525
            for j in [-2, -3, -4, -5]:
526
                if ip_set[i][j] is not None:
527
                    if last1 is None:
528
                        last1 = j
529
                    elif last2 is None:
530
                        last2 = j
531
532
            if last2 is None:
533
                pop_and_add(gf_matrix[i], 0, max_length_mat)
534
                continue
535
536
            pop_and_add(gf_matrix[i], get_gf(ip_set[i][last2], ip_set[i][last1],
537
                                             ip_set[i][-1]), max_length_mat)
538
539
        else:
540
541
            pop_and_add(gf_matrix[i], 0, max_length_mat)
542
543
    return