#!/usr/bin/env python
# For Serial
import serial
# For Audio
import time # audio and serial
#from kivy.core.audio import SoundLoader
# For face tracking
import numpy as np
import cv2
import cv2.cv as cv
from video import create_capture
from common import clock, draw_str
# Configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(
port = '/dev/cu.usbmodemfa131',
baudrate = 9600,
parity = serial.PARITY_ODD,
stopbits = serial.STOPBITS_TWO,
bytesize = serial.SEVENBITS
)
ser.isOpen()
help_message = '''
USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def get_center_rect(x1, y1, x2, y2):
cx = int(round((abs(x2 - x1) / 2.0) + x1))
cy = int(round((abs(y2 - y1) / 2.0) + y1))
return (cx, cy)
def draw_rects(img, rects, color):
center = ();
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
# Circle Data
center = (cx, cy) = get_center_rect(x1, y1, x2, y2)
radius = 5
circle_color = (0, 0, 255)
#print 'CIRCLE:', center
cv2.circle(img, center, radius, circle_color, thickness=3, lineType=8, shift=0)
break; # just draw the first one
return center
def draw_sub_rects(rects):#, prev_time):
dt = 10
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
subrects = detect(roi.copy(), nested)
if (len(subrects) > 0):
print "THREATENED"
ser.write("threatened" + '\r\n') #send a "threatened" signal
'''
if (clock() - prev_time > dt):
if (len(subrects) > 0):
if sound:
#print("Sound found at %s" % sound.source)
#print("Sound is %.3f seconds" % sound.length)
sound.play()
#time.sleep(5) # delays for 5 seconds
sound.stop()
prev_time = clock();
'''
draw_rects(vis_roi, subrects, (255, 0, 0))
if __name__ == '__main__':
import sys, getopt
print help_message
#sound = SoundLoader.load('angry_owls.mp3')
#prev_time = 0
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_smile.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
cam = create_capture(video_src, fallback = 'synth:bg=../cpp/lena.jpg:noise=0.05')
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
#print rects
vis = img.copy()
if (len(rects) > 0):
cx, cy = draw_rects(vis, rects, (0, 255, 0))
draw_sub_rects(rects)
#draw_sub_rects(rects, prev_time)
dt = clock() - t
# Allow the owl to track the face
ANGLE = 15 # must be an integer
X_MARGIN = 100 # the margin of error to which the owl should try to center
screen_cx = cam.get(3) / 2.0 # screen's center x value
if ((screen_cx - X_MARGIN/2.0) >= cx):
print "ROTATE camera LEFT"
ser.write(str(ANGLE) + '\r\n')
elif (cx >= (screen_cx + X_MARGIN/2.0)):
print "ROTATE camera RIGHT"
ser.write("-" + str(ANGLE) + '\r\n')
'''
To interpret the angle on the Aarduino's
side, the DIR is set by the sign and
the quanity of rotation is set by the angle.
The Arduino takes care of the bounds of
owl neck's rotation. Meaning this code just
tells the owl's neck by how much it should
turn and in what direction.
'''
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
if 0xFF & cv2.waitKey(5) == 27: # 27 => ESCAPE key
break
cv2.destroyAllWindows() # clean up the display
ser.close() # cleanup the serial operation
exit()
Click to Expand
Content Rating
Is this a good/useful/informative piece of content to include in the project? Have your say!
You must login before you can post a comment. .