|
a |
|
b/vis/visual.py |
|
|
1 |
from enum import IntEnum, unique |
|
|
2 |
from typing import List |
|
|
3 |
import cv2 |
|
|
4 |
import numpy as np |
|
|
5 |
|
|
|
6 |
|
|
|
7 |
@unique |
|
|
8 |
class CocoPart(IntEnum): |
|
|
9 |
"""Body part locations in the 'coordinates' list.""" |
|
|
10 |
Nose = 0 |
|
|
11 |
LEye = 1 |
|
|
12 |
REye = 2 |
|
|
13 |
LEar = 3 |
|
|
14 |
REar = 4 |
|
|
15 |
LShoulder = 5 |
|
|
16 |
RShoulder = 6 |
|
|
17 |
LElbow = 7 |
|
|
18 |
RElbow = 8 |
|
|
19 |
LWrist = 9 |
|
|
20 |
RWrist = 10 |
|
|
21 |
LHip = 11 |
|
|
22 |
RHip = 12 |
|
|
23 |
LKnee = 13 |
|
|
24 |
RKnee = 14 |
|
|
25 |
LAnkle = 15 |
|
|
26 |
RAnkle = 16 |
|
|
27 |
|
|
|
28 |
|
|
|
29 |
SKELETON_CONNECTIONS_COCO = [(0, 1, (210, 182, 247)), (0, 2, (127, 127, 127)), (1, 2, (194, 119, 227)), |
|
|
30 |
(1, 3, (199, 199, 199)), (2, 4, (34, 189, 188)), (3, 5, (141, 219, 219)), |
|
|
31 |
(4, 6, (207, 190, 23)), (5, 6, (150, 152, 255)), (5, 7, (189, 103, 148)), |
|
|
32 |
(5, 11, (138, 223, 152)), (6, 8, (213, 176, 197)), (6, 12, (40, 39, 214)), |
|
|
33 |
(7, 9, (75, 86, 140)), (8, 10, (148, 156, 196)), (11, 12, (44, 160, 44)), |
|
|
34 |
(11, 13, (232, 199, 174)), (12, 14, |
|
|
35 |
(120, 187, 255)), (13, 15, (180, 119, 31)), |
|
|
36 |
(14, 16, (14, 127, 255))] |
|
|
37 |
|
|
|
38 |
|
|
|
39 |
SKELETON_CONNECTIONS_5P = [('H', 'N', (210, 182, 247)), ('N', 'B', (210, 182, 247)), ('B', 'KL', (210, 182, 247)), |
|
|
40 |
('B', 'KR', (210, 182, 247)), ('KL', 'KR', (210, 182, 247))] |
|
|
41 |
|
|
|
42 |
COLOR_ARRAY = [(210, 182, 247), (127, 127, 127), (194, 119, 227), (199, 199, 199), (34, 189, 188), |
|
|
43 |
(141, 219, 219), (207, 190, 23), (150, 152, 255), (189, 103, 148), (138, 223, 152)] |
|
|
44 |
|
|
|
45 |
UNMATCHED_COLOR = (180, 119, 31) |
|
|
46 |
# activity_dict = { |
|
|
47 |
# 1.0: "Falling forward using hands", |
|
|
48 |
# 2.0: "Falling forward using knees", |
|
|
49 |
# 3: "Falling backwards", |
|
|
50 |
# 4: "Falling sideward", |
|
|
51 |
# 5: "Falling", |
|
|
52 |
# 6: "Walking", |
|
|
53 |
# 7: "Standing", |
|
|
54 |
# 8: "Sitting", |
|
|
55 |
# 9: "Picking up an object", |
|
|
56 |
# 10: "Jumping", |
|
|
57 |
# 11: "Laying", |
|
|
58 |
# 12: "False Fall", |
|
|
59 |
# 20: "None" |
|
|
60 |
# } |
|
|
61 |
activity_dict = { |
|
|
62 |
1.0: "Falling forward using hands", |
|
|
63 |
2.0: "Falling forward using knees", |
|
|
64 |
3: "Falling backwards", |
|
|
65 |
4: "Falling sideward", |
|
|
66 |
5: "FALL", |
|
|
67 |
6: "Normal", |
|
|
68 |
7: "Normal", |
|
|
69 |
8: "Normal", |
|
|
70 |
9: "Normal", |
|
|
71 |
10: "Normal", |
|
|
72 |
11: "Normal", |
|
|
73 |
12: "FALL Warning", |
|
|
74 |
20: "None" |
|
|
75 |
} |
|
|
76 |
|
|
|
77 |
|
|
|
78 |
def write_on_image(img: np.ndarray, text: str, color: List) -> np.ndarray: |
|
|
79 |
"""Write text at the top of the image.""" |
|
|
80 |
# Add a white border to top of image for writing text |
|
|
81 |
img = cv2.copyMakeBorder(src=img, |
|
|
82 |
top=int(0.1 * img.shape[0]), |
|
|
83 |
bottom=0, |
|
|
84 |
left=0, |
|
|
85 |
right=0, |
|
|
86 |
borderType=cv2.BORDER_CONSTANT, |
|
|
87 |
dst=None, |
|
|
88 |
value=[255, 255, 255]) |
|
|
89 |
for i, line in enumerate(text.split('\n')): |
|
|
90 |
y = 30 + i * 30 |
|
|
91 |
cv2.putText(img=img, |
|
|
92 |
text=line, |
|
|
93 |
org=(0, y), |
|
|
94 |
fontFace=cv2.FONT_HERSHEY_SIMPLEX, |
|
|
95 |
fontScale=0.7, |
|
|
96 |
color=color, |
|
|
97 |
thickness=2) |
|
|
98 |
|
|
|
99 |
return img |
|
|
100 |
|
|
|
101 |
|
|
|
102 |
def visualise(img: np.ndarray, keypoint_sets: List, width: int, height: int, vis_keypoints: bool = False, |
|
|
103 |
vis_skeleton: bool = False, CocoPointsOn: bool = False) -> np.ndarray: |
|
|
104 |
"""Draw keypoints/skeleton on the output video frame.""" |
|
|
105 |
|
|
|
106 |
if CocoPointsOn: |
|
|
107 |
SKELETON_CONNECTIONS = SKELETON_CONNECTIONS_COCO |
|
|
108 |
else: |
|
|
109 |
SKELETON_CONNECTIONS = SKELETON_CONNECTIONS_5P |
|
|
110 |
|
|
|
111 |
if vis_keypoints or vis_skeleton: |
|
|
112 |
for keypoints in keypoint_sets: |
|
|
113 |
if not CocoPointsOn: |
|
|
114 |
keypoints = keypoints["keypoints"] |
|
|
115 |
|
|
|
116 |
if vis_skeleton: |
|
|
117 |
for p1i, p2i, color in SKELETON_CONNECTIONS: |
|
|
118 |
if keypoints[p1i] is None or keypoints[p2i] is None: |
|
|
119 |
continue |
|
|
120 |
|
|
|
121 |
p1 = (int(keypoints[p1i][0] * width), int(keypoints[p1i][1] * height)) |
|
|
122 |
p2 = (int(keypoints[p2i][0] * width), int(keypoints[p2i][1] * height)) |
|
|
123 |
|
|
|
124 |
if p1 == (0, 0) or p2 == (0, 0): |
|
|
125 |
continue |
|
|
126 |
|
|
|
127 |
cv2.line(img=img, pt1=p1, pt2=p2, color=color, thickness=3) |
|
|
128 |
|
|
|
129 |
return img |
|
|
130 |
|
|
|
131 |
|
|
|
132 |
def visualise_tracking(img: np.ndarray, keypoint_sets: List, width: int, height: int, num_matched: int, vis_keypoints: bool = False, |
|
|
133 |
vis_skeleton: bool = False, CocoPointsOn: bool = False) -> np.ndarray: |
|
|
134 |
"""Draw keypoints/skeleton on the output video frame.""" |
|
|
135 |
|
|
|
136 |
if CocoPointsOn: |
|
|
137 |
SKELETON_CONNECTIONS = SKELETON_CONNECTIONS_COCO |
|
|
138 |
else: |
|
|
139 |
SKELETON_CONNECTIONS = SKELETON_CONNECTIONS_5P |
|
|
140 |
|
|
|
141 |
if vis_keypoints or vis_skeleton: |
|
|
142 |
for i, keypoints in enumerate(keypoint_sets): |
|
|
143 |
if keypoints is None: |
|
|
144 |
continue |
|
|
145 |
if not CocoPointsOn: |
|
|
146 |
keypoints = keypoints["keypoints"] |
|
|
147 |
if vis_skeleton: |
|
|
148 |
for p1i, p2i, color in SKELETON_CONNECTIONS: |
|
|
149 |
if keypoints[p1i] is None or keypoints[p2i] is None: |
|
|
150 |
continue |
|
|
151 |
|
|
|
152 |
p1 = (int(keypoints[p1i][0] * width), int(keypoints[p1i][1] * height)) |
|
|
153 |
p2 = (int(keypoints[p2i][0] * width), int(keypoints[p2i][1] * height)) |
|
|
154 |
|
|
|
155 |
if p1 == (0, 0) or p2 == (0, 0): |
|
|
156 |
continue |
|
|
157 |
if i < num_matched: |
|
|
158 |
color = COLOR_ARRAY[i % 10] |
|
|
159 |
else: |
|
|
160 |
color = UNMATCHED_COLOR |
|
|
161 |
|
|
|
162 |
cv2.line(img=img, pt1=p1, pt2=p2, color=color, thickness=3) |
|
|
163 |
|
|
|
164 |
return img |