-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain_no_gui.py
92 lines (65 loc) · 2.43 KB
/
main_no_gui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from models.GestureDetectionModel import GestureDetectionModel
import cv2
import queue
import threading
capture_thread = None
def capture_frames(cap):
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if frame_queue.full():
frame_queue.get_nowait()
frame_queue.put(frame)
except Exception as e:
print(f'Capture thread error: {e}')
finally:
cap.release()
cv2.destroyAllWindows()
print('Capture thread released the camera')
def on_model_loaded():
global capture_thread
capture_thread = threading.Thread(target=capture_frames, args=(cap,))
capture_thread.start()
print("\nAll dependencies loaded\n")
cap = cv2.VideoCapture(0)
print("\nLoading dependencies..\nThis might take a few minutes..\nThank you for your patience :)\n\n")
model = GestureDetectionModel(on_load=on_model_loaded, use_thread = False)
action_types = model.action_types
last_action_index = None
last_prediction = None
frame_queue = queue.Queue(maxsize = 3)
def handle_input(prediction, frame):
global last_action_index, last_prediction
if prediction is None:
return frame
frame = model.highlight_gesture(frame, prediction)
action_name, action_index = model.get_action(prediction)
if last_action_index is None:
last_action_index = action_index
elif prediction != last_prediction:
last_prediction = prediction
print(f'Gesture {prediction} --> Action: {action_name}')
if action_index != last_action_index:
last_action_index = action_index
model.reset_kalman_filter()
elif action_index == action_types['TOGGLE_RELATIVE_MOUSE']:
return frame
model.execute_action(action_index, frame)
return frame
while True:
if frame_queue.empty():
continue
frame = frame_queue.get_nowait()
frame, landmarks = model.process_frame(frame, draw_connections=True)
if landmarks:
is_left_hand = model.get_hand_orientation() == "Left"
prediction = model.predict(landmarks, is_left_hand = is_left_hand)
frame = handle_input(prediction, frame)
cv2.imshow('Virtual Mouse', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
capture_thread.join()