blob: 73fd03246bfa6c6b3fe4866596d4419620e9e403 [file] [log] [blame]
from sense_hat import SenseHat
from time import sleep
# Our new APIs:
import vision
# initialize SenseHat instance and clear the LED matrix
sense = SenseHat()
sense.clear()
#Raspimon colors
r = (255, 0, 0)
g = (0, 255, 0)
b = (0, 0, 255)
k = (0, 0, 0)
w = (255, 255, 255)
c = (0, 255, 255)
y = (255, 255, 0)
o = (255, 128, 0)
n = (255, 128, 128)
p = (128, 0, 128)
d = (255, 0, 128)
l = (128, 255, 128)
# draw the initial Raspimon (before reacting to vision)
chirp2 = [ k , k , k , k , k , k , k , k , k , k , k , l , r , y , k , k , k , k , k , l , l ,
k , k , k , l , g , g , w , w , g , g , l , k , l , g , w , w , l , g , k , k , k , l , w , w , l ,
k , k , k , k , k , l , l , k , k , k , k , k , o , k , k , o , k , k ]
chirp1 = [ k , k , k , l , r , y , k , k , k , k , k , l , l , k , k , k , k , k , g , w , w ,
g , k , k , k , k , g , w , w , g , k , k , k , k , l , w , w , l , k , k , k , k , l , w , w , l ,
k , k , k , k , k , l , l , k , k , k , K , K , o , K , K , o , K , K ]
sense.set_pixels(chirp1)
# load the neural network model (obfuscates use of TF and Edge TPU)
detector = vision.Detector(vision.FACE_DETECTION_MODEL)
def dance():
sense.set_pixels(chirp2)
sleep(0.3)
sense.set_pixels(chirp1)
sleep(0.3)
# redraw the raspimon in response to detected faces
def react_to_faces(faces):
print(len(faces), 'faces visible')
#your code here
if len(faces) == 1:
dance()
# run a loop to run the model in real-time
for frame in vision.get_frames('Face Detector', size=(640, 480)):
faces = detector.get_objects(frame)
# Draw bounding boxes on the frame and display it
vision.draw_objects(frame, faces)
# Pass faces to function that controls raspimon
react_to_faces(faces)