Make raspimon that looks at you based on your face location.
Also flips the camera frame so it works like a mirror.
Change-Id: I4a45c5ceb2680686d638caa17abc0ff04ca49950
diff --git a/bestiary.py b/bestiary.py
index fcdb43f..8fccf50 100644
--- a/bestiary.py
+++ b/bestiary.py
@@ -12,6 +12,69 @@
d = (255, 0, 128)
l = (128, 255, 128)
+class Volt:
+ LOOK_UP = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, k, w, y, y, w, k, y,
+ y, k, k, y, y, k, k, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+ LOOK_UP_RIGHT = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, k, w, y, y, k, w, y,
+ y, k, k, y, y, k, k, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+ LOOK_UP_LEFT = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, w, k, y, y, w, k, y,
+ y, k, k, y, y, k, k, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+ LOOK_DOWN = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, k, k, y, y, k, k, y,
+ y, k, w, y, y, w, k, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+ LOOK_DOWN_RIGHT = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, k, k, y, y, k, k, y,
+ y, k, w, y, y, k, w, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+ LOOK_DOWN_LEFT = [
+ o, y, y, y, y, y, y, o,
+ o, o, n, y, y, n, o, o,
+ y, k, k, y, y, k, k, y,
+ y, w, k, y, y, w, k, y,
+ y, y, y, k, k, y, y, y,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n,
+ n, n, n, y, y, n, n, n
+ ]
+
+
# Chirp (bird) poses
class Chirp:
STANDING = [
diff --git a/raspimon_sees_faces.py b/raspimon_sees_faces.py
index 2c25328..e1f0fd4 100644
--- a/raspimon_sees_faces.py
+++ b/raspimon_sees_faces.py
@@ -1,38 +1,73 @@
from sense_hat import SenseHat
from time import sleep
-import threading
-# Our new APIs:
+from math import floor
import vision
-from bestiary import Chirp
+from detect import BBox
+from bestiary import Volt
+
+FOV_COLUMNS = 3
+FOV_ROWS = 2
# initialize SenseHat instance and clear the LED matrix
sense = SenseHat()
-sense.set_pixels(Chirp.STANDING)
+sense.set_pixels(Volt.LOOK_UP)
-# animate the raspimon
-def dance():
- sense.set_pixels(Chirp.WINGS_UP)
- sleep(0.3)
- sense.set_pixels(Chirp.STANDING)
- sleep(0.3)
-# redraw the raspimon in response to detected faces
+def get_fov_bboxes(image_size):
+ """Returns a list of BBox objects representing each cell in the
+ raspimon's field of view (FOV)."""
+ cell_width = image_size[0] / FOV_COLUMNS
+ cell_height = image_size[1] / FOV_ROWS
+ bboxes = []
+ ymin = 0
+ for row in range(FOV_ROWS):
+ xmin = 0
+ for column in range(FOV_COLUMNS):
+ bbox = BBox(xmin, ymin, xmin + cell_width, ymin + cell_height)
+ bboxes.append(bbox)
+ xmin = xmin + cell_width
+ ymin = ymin + cell_height
+ return bboxes
+
+
+def get_location(bbox, image_size):
+ """Returns the index position of the cell where the given BBox
+ currently appears. The image_size is (width, height) """
+ fov_bboxes = get_fov_bboxes(image_size)
+ # Create a center-point box (a small box representing the face center)
+ pbox = BBox(bbox.xmin, bbox.ymin, bbox.xmin + 1, bbox.ymin + 1)
+ pbox = pbox.translate(floor(bbox.width/2), floor(bbox.height/2))
+ # Check which cell the currently intersects with the face center
+ for index, fov_box in enumerate(fov_bboxes):
+ if BBox.iou(pbox, fov_box) > 0:
+ return index
+
+
def react_to_faces(faces):
- print(len(faces), 'faces visible')
- if len(faces) > 0 and threading.active_count() == 1:
- thread = threading.Thread(target=dance)
- thread.start()
+ """Redraw the raspimon in response to detected faces."""
+ if (len(faces) == 1):
+ face_loc = get_location(faces[0].bbox, (640, 480))
+ if face_loc == 0:
+ sense.set_pixels(Volt.LOOK_UP_LEFT)
+ elif face_loc == 1:
+ sense.set_pixels(Volt.LOOK_UP)
+ elif face_loc == 2:
+ sense.set_pixels(Volt.LOOK_UP_RIGHT)
+ elif face_loc == 3:
+ sense.set_pixels(Volt.LOOK_DOWN_LEFT)
+ elif face_loc == 4:
+ sense.set_pixels(Volt.LOOK_DOWN)
+ elif face_loc == 5:
+ sense.set_pixels(Volt.LOOK_DOWN_RIGHT)
+
# load the neural network model (obfuscates use of TF and Edge TPU)
detector = vision.Detector(vision.FACE_DETECTION_MODEL)
# run a loop to run the model in real-time
for frame in vision.get_frames():
-
faces = detector.get_objects(frame)
-
# Draw bounding boxes on the frame and display it
vision.draw_objects(frame, faces)
-
# Pass faces to function that controls raspimon
react_to_faces(faces)
diff --git a/vision.py b/vision.py
index 58c2675..0cdc34b 100644
--- a/vision.py
+++ b/vision.py
@@ -62,6 +62,7 @@
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while True:
ret, frame = cap.read()
+ frame = cv2.flip(frame,1)
if ret:
yield frame
cv2.imshow(title, frame)