|
a |
|
b/fall_detector.py |
|
|
1 |
import openpifpaf |
|
|
2 |
import torch |
|
|
3 |
import argparse |
|
|
4 |
import copy |
|
|
5 |
import logging |
|
|
6 |
import torch.multiprocessing as mp |
|
|
7 |
import csv |
|
|
8 |
from default_params import * |
|
|
9 |
from algorithms import * |
|
|
10 |
from helpers import last_ip |
|
|
11 |
import os |
|
|
12 |
import matplotlib.pyplot as plt |
|
|
13 |
|
|
|
14 |
try: |
|
|
15 |
mp.set_start_method('spawn') |
|
|
16 |
except RuntimeError: |
|
|
17 |
pass |
|
|
18 |
|
|
|
19 |
|
|
|
20 |
class FallDetector: |
|
|
21 |
def __init__(self, t=DEFAULT_CONSEC_FRAMES): |
|
|
22 |
self.consecutive_frames = t |
|
|
23 |
self.args = self.cli() |
|
|
24 |
|
|
|
25 |
def cli(self): |
|
|
26 |
parser = argparse.ArgumentParser( |
|
|
27 |
description=__doc__, |
|
|
28 |
formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
|
|
29 |
) |
|
|
30 |
|
|
|
31 |
openpifpaf.network.Factory.cli(parser) |
|
|
32 |
openpifpaf.decoder.cli(parser) |
|
|
33 |
parser.add_argument('--resolution', default=0.4, type=float, |
|
|
34 |
help=('Resolution prescale factor from 640x480. ' |
|
|
35 |
'Will be rounded to multiples of 16.')) |
|
|
36 |
parser.add_argument('--resize', default=None, type=str, |
|
|
37 |
help=('Force input image resize. ' |
|
|
38 |
'Example WIDTHxHEIGHT.')) |
|
|
39 |
parser.add_argument('--num_cams', default=1, type=int, |
|
|
40 |
help='Number of Cameras.') |
|
|
41 |
parser.add_argument('--video', default=None, type=str, |
|
|
42 |
help='Path to the video file.\nFor single video fall detection(--num_cams=1), save your videos as abc.xyz and set --video=abc.xyz\nFor 2 video fall detection(--num_cams=2), save your videos as abc1.xyz & abc2.xyz and set --video=abc.xyz') |
|
|
43 |
parser.add_argument('--debug', default=False, action='store_true', |
|
|
44 |
help='debug messages and autoreload') |
|
|
45 |
parser.add_argument('--disable_cuda', default=False, action='store_true', |
|
|
46 |
help='disables cuda support and runs from gpu') |
|
|
47 |
|
|
|
48 |
vis_args = parser.add_argument_group('Visualisation') |
|
|
49 |
vis_args.add_argument('--plot_graph', default=False, action='store_true', |
|
|
50 |
help='Plot the graph of features extracted from keypoints of pose.') |
|
|
51 |
vis_args.add_argument('--joints', default=True, action='store_true', |
|
|
52 |
help='Draw joint\'s keypoints on the output video.') |
|
|
53 |
vis_args.add_argument('--skeleton', default=True, action='store_true', |
|
|
54 |
help='Draw skeleton on the output video.') |
|
|
55 |
vis_args.add_argument('--coco_points', default=False, action='store_true', |
|
|
56 |
help='Visualises the COCO points of the human pose.') |
|
|
57 |
vis_args.add_argument('--save_output', default=False, action='store_true', |
|
|
58 |
help='Save the result in a video file. Output videos are saved in the same directory as input videos with "out" appended at the start of the title') |
|
|
59 |
vis_args.add_argument('--fps', default=18, type=int, |
|
|
60 |
help='FPS for the output video.') |
|
|
61 |
# vis_args.add_argument('--out-path', default='result.avi', type=str, |
|
|
62 |
# help='Save the output video at the path specified. .avi file format.') |
|
|
63 |
|
|
|
64 |
args = parser.parse_args() |
|
|
65 |
|
|
|
66 |
# Log |
|
|
67 |
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG) |
|
|
68 |
|
|
|
69 |
args.force_complete_pose = True |
|
|
70 |
args.instance_threshold = 0.2 |
|
|
71 |
args.seed_threshold = 0.5 |
|
|
72 |
|
|
|
73 |
# Add args.device |
|
|
74 |
args.device = torch.device('cpu') |
|
|
75 |
args.pin_memory = False |
|
|
76 |
if not args.disable_cuda and torch.cuda.is_available(): |
|
|
77 |
args.device = torch.device('cuda') |
|
|
78 |
args.pin_memory = True |
|
|
79 |
|
|
|
80 |
if args.checkpoint is None: |
|
|
81 |
args.checkpoint = 'shufflenetv2k16w' |
|
|
82 |
|
|
|
83 |
openpifpaf.decoder.configure(args) |
|
|
84 |
openpifpaf.network.Factory.configure(args) |
|
|
85 |
|
|
|
86 |
return args |
|
|
87 |
|
|
|
88 |
def begin(self): |
|
|
89 |
print('Starting...') |
|
|
90 |
e = mp.Event() |
|
|
91 |
queues = [mp.Queue() for _ in range(self.args.num_cams)] |
|
|
92 |
counter1 = mp.Value('i', 0) |
|
|
93 |
counter2 = mp.Value('i', 0) |
|
|
94 |
argss = [copy.deepcopy(self.args) for _ in range(self.args.num_cams)] |
|
|
95 |
if self.args.num_cams == 1: |
|
|
96 |
if self.args.video is None: |
|
|
97 |
argss[0].video = 0 |
|
|
98 |
process1 = mp.Process(target=extract_keypoints_parallel, |
|
|
99 |
args=(queues[0], argss[0], counter1, counter2, self.consecutive_frames, e)) |
|
|
100 |
process1.start() |
|
|
101 |
if self.args.coco_points: |
|
|
102 |
process1.join() |
|
|
103 |
else: |
|
|
104 |
process2 = mp.Process(target=alg2_sequential, args=(queues, argss, |
|
|
105 |
self.consecutive_frames, e)) |
|
|
106 |
process2.start() |
|
|
107 |
process1.join() |
|
|
108 |
elif self.args.num_cams == 2: |
|
|
109 |
if self.args.video is None: |
|
|
110 |
argss[0].video = 0 |
|
|
111 |
argss[1].video = 1 |
|
|
112 |
else: |
|
|
113 |
try: |
|
|
114 |
vid_name = self.args.video.split('.') |
|
|
115 |
argss[0].video = ''.join(vid_name[:-1])+'1.'+vid_name[-1] |
|
|
116 |
argss[1].video = ''.join(vid_name[:-1])+'2.'+vid_name[-1] |
|
|
117 |
print('Video 1:', argss[0].video) |
|
|
118 |
print('Video 2:', argss[1].video) |
|
|
119 |
except Exception as exep: |
|
|
120 |
print('Error: argument --video not properly set') |
|
|
121 |
print('For 2 video fall detection(--num_cams=2), save your videos as abc1.xyz & abc2.xyz and set --video=abc.xyz') |
|
|
122 |
return |
|
|
123 |
process1_1 = mp.Process(target=extract_keypoints_parallel, |
|
|
124 |
args=(queues[0], argss[0], counter1, counter2, self.consecutive_frames, e)) |
|
|
125 |
process1_2 = mp.Process(target=extract_keypoints_parallel, |
|
|
126 |
args=(queues[1], argss[1], counter2, counter1, self.consecutive_frames, e)) |
|
|
127 |
process1_1.start() |
|
|
128 |
process1_2.start() |
|
|
129 |
if self.args.coco_points: |
|
|
130 |
process1_1.join() |
|
|
131 |
process1_2.join() |
|
|
132 |
else: |
|
|
133 |
process2 = mp.Process(target=alg2_sequential, args=(queues, argss, |
|
|
134 |
self.consecutive_frames, e)) |
|
|
135 |
process2.start() |
|
|
136 |
process1_1.join() |
|
|
137 |
process1_2.join() |
|
|
138 |
else: |
|
|
139 |
print('More than 2 cameras are currently not supported') |
|
|
140 |
return |
|
|
141 |
|
|
|
142 |
if not self.args.coco_points: |
|
|
143 |
process2.join() |
|
|
144 |
print('Exiting...') |
|
|
145 |
return |
|
|
146 |
|
|
|
147 |
|
|
|
148 |
if __name__ == "__main__": |
|
|
149 |
f = FallDetector() |
|
|
150 |
f.begin() |