-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPythonSandbox_takephoto.py
96 lines (84 loc) · 4.6 KB
/
PythonSandbox_takephoto.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import libjevois as jevois
import cv2
import numpy as np
## Simple example of image processing using OpenCV in Python on JeVois
#
# This module is here for you to experiment with Python OpenCV on JeVois.
#
# By default, we get the next video frame from the camera as an OpenCV BGR (color) image named 'inimg'.
# We then apply some image processing to it to create an output BGR image named 'outimg'.
# We finally add some text drawings to outimg and send it to host over USB.
#
# See http://jevois.org/tutorials for tutorials on getting started with programming JeVois in Python without having
# to install any development software on your host computer.
#
# @author Laurent Itti
#
# @videomapping YUYV 352 288 30.0 YUYV 352 288 30.0 JeVois PythonSandbox
# @email itti\@usc.edu
# @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
# @copyright Copyright (C) 2017 by Laurent Itti, iLab and the University of Southern California
# @mainurl http://jevois.org
# @supporturl http://jevois.org/doc
# @otherurl http://iLab.usc.edu
# @license GPL v3
# @distribution Unrestricted
# @restrictions None
# @ingroup modules
class PythonSandbox:
# ###################################################################################################
## Constructor
def __init__(self):
# Instantiate a JeVois Timer to measure our processing framerate:
self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
self.frame = 0
msg="detection"
self.i = 1
# ###################################################################################################
## Process function with USB output
def process(self, inframe, outframe):
# Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR by default. If
# you need a grayscale image instead, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB()
# and getCvRGBA():
self.timer.start()
msg = "detection"
test1 = inframe.getCvBGR()
gray_img = inframe.getCvGRAY()
face_cascade = cv2.CascadeClassifier('share/facedetector/haarcascade_frontalface_alt.xml')
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5);
for (x, y, w, h) in faces:
cv2.rectangle(test1, (x, y), (x+w, y+h), (0,255, 0), 2)
jevois.sendSerial("face detected".format(self.frame));
cv2.imwrite('modules/JeVois/PythonSandbox/training-data/'+ str(self.i) + ".jpg", gray_img[y:y +h, x:x + w])
msg = "Coucou clic Photo"
jevois.sendSerial("photo taken");
self.frame += 1
self.i += 1
outimg = test1
# Start measuring image processing time (NOTE: does not account for input conversion time):
# Detect edges using the Laplacian algorithm from OpenCV:
#
# Replace the line below by your own code! See for example
# - http://docs.opencv.org/trunk/d4/d13/tutorial_py_filtering.html
# - http://docs.opencv.org/trunk/d9/d61/tutorial_py_morphological_ops.html
# - http://docs.opencv.org/trunk/d5/d0f/tutorial_py_gradients.html
# - http://docs.opencv.org/trunk/d7/d4d/tutorial_py_thresholding.html
#
# and so on. When they do "img = cv2.imread('name.jpg', 0)" in these tutorials, the last 0 means they want a
# gray image, so you should use getCvGRAY() above in these cases. When they do not specify a final 0 in imread()
# then usually they assume color and you should use getCvBGR() here.
#
# The simplest you could try is:
# outimg = inimg
# which will make a simple copy of the input image to output.
#outimg = cv2.Laplacian(inimg, -1, ksize=5, scale=0.25, delta=127)
# Write a title:
cv2.putText(outimg, msg, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255),
1, cv2.LINE_AA)
# Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
fps = self.timer.stop()
height, width, channels = outimg.shape # if outimg is grayscale, change to: height, width = outimg.shape
cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# Convert our BGR output image to video output format and send to host over USB. If your output image is not
# BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate:
outframe.sendCvBGR(outimg)