Add cv2 helper functions
Change-Id: If88432008a429f8989c14c5a1d3163da936922ae
diff --git a/example.py b/example.py
index cff24f0..6dee6ae 100644
--- a/example.py
+++ b/example.py
@@ -1,22 +1,17 @@
-import cv2
import vision
def run_detector_example():
detector = vision.make_detector('ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite')
for frame in vision.Camera('Face Detector', size=(640, 480)):
faces = detector(frame)
- for face in faces:
- bbox = face.bbox
- cv2.rectangle(frame, (bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax), (255, 0, 255), 5)
+ vision.draw_objects(frame, faces, color=(255, 0, 255), thickness=5)
def run_classifier_example():
labels = vision.load_labels('imagenet_labels.txt')
classifier = vision.make_classifier('mobilenet_v2_1.0_224_quant_edgetpu.tflite')
for frame in vision.Camera('Object Classifier', size=(640, 480)):
classes = classifier(frame)
- for index, score in classes:
- label = '%s (%.2f)' % (labels.get(index, 'n/a'), score)
- cv2.putText(frame, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 0, 255), 2)
+ vision.draw_classes(frame, classes, labels, color=(255, 0, 255))
if __name__ == '__main__':
#run_classifier_example()
diff --git a/vision.py b/vision.py
index 3b5141e..957bf1d 100644
--- a/vision.py
+++ b/vision.py
@@ -37,6 +37,16 @@
return classify.get_output(interpreter, top_k, threshold)
return process
+def draw_objects(frame, objs, color, thickness):
+ for obj in objs:
+ bbox = obj.bbox
+ cv2.rectangle(frame, (bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax), color, thickness)
+
+def draw_classes(frame, classes, labels, color):
+ for index, score in classes:
+ label = '%s (%.2f)' % (labels.get(index, 'n/a'), score)
+ cv2.putText(frame, label, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, color, 2)
+
def Camera(title, size):
width, height = size
cap = cv2.VideoCapture(0)