add raspimon code for basic classification

Change-Id: Id4ad30df5a8a9f7fdecb6c305027fcb1b3bdab9b
diff --git a/raspimon_sees_things.py b/raspimon_sees_things.py
new file mode 100644
index 0000000..d0d840b
--- /dev/null
+++ b/raspimon_sees_things.py
@@ -0,0 +1,46 @@
+from sense_hat import SenseHat
+from threading import Thread
+from queue import Queue
+import vision
+
+# Initialize SenseHat instance and clear the LED matrix
+sense = SenseHat()
+sense.clear()
+
+# Load the neural network model
+labels = vision.load_labels(vision.CLASSIFICATION_LABELS)
+classifier = vision.Classifier(vision.CLASSIFICATION_MODEL)
+
+
+def react_to_things(queue):
+  """Redraw the raspimon in response to detected things."""
+  while True:
+    classes = queue.get()
+    if classes:
+      label_id, score = classes[0]
+      label = labels.get(label_id, 'n/a')
+      if score > 0.5:
+        sense.show_message(label, scroll_speed=0.06)
+      else:
+        sense.show_letter('?')
+
+
+# Create thread and queue to update the SenseHat
+classes_queue = Queue()
+sensehat_thread = Thread(target=react_to_things,
+                         args=[classes_queue],
+                         daemon=True)
+sensehat_thread.start()
+
+
+# Run a loop to run the model in real-time
+for frame in vision.get_frames():
+  # Get list of all recognized objects in the frame
+  classes = classifier.get_classes(frame)
+
+  # Draw the label name on the video
+  vision.draw_classes(frame, classes, labels)
+
+  # Pass the classification results to the raspimon
+  if classes:
+    classes_queue.put(classes)
diff --git a/vision.py b/vision.py
index 3a243b2..3c042cf 100644
--- a/vision.py
+++ b/vision.py
@@ -21,7 +21,7 @@
 from pycoral.adapters import detect
 
 FACE_DETECTION_MODEL = 'ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite'
-CLASSIFICATION_MODEL = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
+CLASSIFICATION_MODEL = 'tf2_mobilenet_v2_1.0_224_ptq_edgetpu.tflite'
 CLASSIFICATION_LABELS = 'imagenet_labels.txt'
 
 CORAL_COLOR = (86, 104, 237)