add threading for animated raspimon, add default vision args, add bestiary for raspimon poses/colors
Change-Id: I05d3e24f43b7106d68b05c4f0a7e8d1ea5f8b72a
diff --git a/bestiary.py b/bestiary.py
new file mode 100644
index 0000000..fcdb43f
--- /dev/null
+++ b/bestiary.py
@@ -0,0 +1,36 @@
+# Raspimon colors
+r = (255, 0, 0)
+g = (0, 255, 0)
+b = (0, 0, 255)
+k = (0, 0, 0)
+w = (255, 255, 255)
+c = (0, 255, 255)
+y = (255, 255, 0)
+o = (255, 128, 0)
+n = (255, 128, 128)
+p = (128, 0, 128)
+d = (255, 0, 128)
+l = (128, 255, 128)
+
+# Chirp (bird) poses
+class Chirp:
+ STANDING = [
+ k, k, k, l, r, y, k, k,
+ k, k, k, l, l, k, k, k,
+ k, k, g, w, w, g, k, k,
+ k, k, g, w, w, g, k, k,
+ k, k, l, w, w, l, k, k,
+ k, k, l, w, w, l, k, k,
+ k, k, k, l, l, k, k, k,
+ k, k, o, k, k, o, k, k
+ ]
+ WINGS_UP = [
+ k, k, k, k, k, k, k, k,
+ k, k, k, l, r, y, k, k,
+ k, k, k, l, l, k, k, k,
+ l, g, g, w, w, g, g, l,
+ k, l, g, w, w, l, g, k,
+ k, k, l, w, w, l, k, k,
+ k, k, k, l, l, k, k, k,
+ k, k, o, k, k, o, k, k
+ ]
diff --git a/raspimon_sees_faces.py b/raspimon_sees_faces.py
index 73fd032..2c25328 100644
--- a/raspimon_sees_faces.py
+++ b/raspimon_sees_faces.py
@@ -1,55 +1,34 @@
from sense_hat import SenseHat
from time import sleep
+import threading
# Our new APIs:
import vision
+from bestiary import Chirp
# initialize SenseHat instance and clear the LED matrix
sense = SenseHat()
-sense.clear()
+sense.set_pixels(Chirp.STANDING)
-#Raspimon colors
-r = (255, 0, 0)
-g = (0, 255, 0)
-b = (0, 0, 255)
-k = (0, 0, 0)
-w = (255, 255, 255)
-c = (0, 255, 255)
-y = (255, 255, 0)
-o = (255, 128, 0)
-n = (255, 128, 128)
-p = (128, 0, 128)
-d = (255, 0, 128)
-l = (128, 255, 128)
+# animate the raspimon
+def dance():
+ sense.set_pixels(Chirp.WINGS_UP)
+ sleep(0.3)
+ sense.set_pixels(Chirp.STANDING)
+ sleep(0.3)
-# draw the initial Raspimon (before reacting to vision)
-chirp2 = [ k , k , k , k , k , k , k , k , k , k , k , l , r , y , k , k , k , k , k , l , l ,
-k , k , k , l , g , g , w , w , g , g , l , k , l , g , w , w , l , g , k , k , k , l , w , w , l ,
-k , k , k , k , k , l , l , k , k , k , k , k , o , k , k , o , k , k ]
-
-chirp1 = [ k , k , k , l , r , y , k , k , k , k , k , l , l , k , k , k , k , k , g , w , w ,
-g , k , k , k , k , g , w , w , g , k , k , k , k , l , w , w , l , k , k , k , k , l , w , w , l ,
-k , k , k , k , k , l , l , k , k , k , K , K , o , K , K , o , K , K ]
-
-sense.set_pixels(chirp1)
+# redraw the raspimon in response to detected faces
+def react_to_faces(faces):
+ print(len(faces), 'faces visible')
+ if len(faces) > 0 and threading.active_count() == 1:
+ thread = threading.Thread(target=dance)
+ thread.start()
# load the neural network model (obfuscates use of TF and Edge TPU)
detector = vision.Detector(vision.FACE_DETECTION_MODEL)
-def dance():
- sense.set_pixels(chirp2)
- sleep(0.3)
- sense.set_pixels(chirp1)
- sleep(0.3)
-
-# redraw the raspimon in response to detected faces
-def react_to_faces(faces):
- print(len(faces), 'faces visible')
- #your code here
- if len(faces) == 1:
- dance()
-
# run a loop to run the model in real-time
-for frame in vision.get_frames('Face Detector', size=(640, 480)):
+for frame in vision.get_frames():
+
faces = detector.get_objects(frame)
# Draw bounding boxes on the frame and display it
diff --git a/vision.py b/vision.py
index c18b160..58c2675 100644
--- a/vision.py
+++ b/vision.py
@@ -55,7 +55,7 @@
label = '%s (%.2f)' % (labels.get(index, 'n/a'), score)
cv2.putText(frame, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, color, 2)
-def get_frames(title, size):
+def get_frames(title='Raspimon camera', size=(640, 480)):
width, height = size
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)