Use python3-pycoral instead of python3-edgetpu

Change-Id: I008dfc6c334dee1963c8fdeaf94a5e25263f0aea
diff --git a/debian/changelog b/debian/changelog
index a8bf97f..2205fbb 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+edgetpuvision (7-1) mendel-eagle; urgency=medium
+
+  * Switching from python3-edgetpu to python3-pycoral.
+
+ -- Coral <coral-support@google.com>  Mon, 09 Nov 2020 16:24:25 -0800
+
 edgetpuvision (6-1) mendel-day; urgency=medium
 
   * Cut for day release.
diff --git a/debian/control b/debian/control
index 4b4e6b5..8914388 100644
--- a/debian/control
+++ b/debian/control
@@ -20,7 +20,6 @@
          gstreamer1.0-plugins-ugly,
          gstreamer1.0-python3-plugin-loader,
          python3-cairo,
-         python3-edgetpu,
          python3-gi,
          python3-gi-cairo,
          python3-gst-1.0,
@@ -28,8 +27,8 @@
          python3-numpy,
          python3-pil,
          python3-protobuf,
+         python3-pycoral,
          weston
 Recommends: imx-gst1.0-plugin,
 Description: EdgeTPU camera API
  API to run inference on image data coming from the camera.
-
diff --git a/edgetpuvision/classify.py b/edgetpuvision/classify.py
index 408ecda..165806f 100644
--- a/edgetpuvision/classify.py
+++ b/edgetpuvision/classify.py
@@ -25,7 +25,8 @@
 import itertools
 import time
 
-from edgetpu.classification.engine import ClassificationEngine
+from pycoral.adapters import classify
+from pycoral.utils import edgetpu
 
 from . import svg
 from . import utils
@@ -109,15 +110,15 @@
 
     fps_counter = utils.avg_fps_counter(30)
 
-    engines, titles = utils.make_engines(args.model, ClassificationEngine)
-    assert utils.same_input_image_sizes(engines)
-    engines = itertools.cycle(engines)
-    engine = next(engines)
+    interpreters, titles = utils.make_interpreters(args.model)
+    assert utils.same_input_image_sizes(interpreters)
+    interpreters = itertools.cycle(interpreters)
+    interpreter = next(interpreters)
 
     labels = utils.load_labels(args.labels)
     draw_overlay = True
 
-    yield utils.input_image_size(engine)
+    yield utils.input_image_size(interpreter)
 
     output = None
     while True:
@@ -126,15 +127,17 @@
         inference_rate = next(fps_counter)
         if draw_overlay:
             start = time.monotonic()
-            results = engine.classify_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k)
+            edgetpu.run_inference(interpreter, tensor)
             inference_time = time.monotonic() - start
 
-            results = [(labels[i], score) for i, score in results]
+            classes = classify.get_classes(interpreter, top_k=args.top_k,
+                score_threshold=args.threshold)
+            results = [(labels[class_id], score) for class_id, score in classes]
             results = acc.send(results)
             if args.print:
                 print_results(inference_rate, results)
 
-            title = titles[engine]
+            title = titles[interpreter]
             output = overlay(title, results, inference_time, inference_rate, layout)
         else:
             output = None
@@ -142,7 +145,7 @@
         if command == 'o':
             draw_overlay = not draw_overlay
         elif command == 'n':
-            engine = next(engines)
+            interpreter = next(interpreters)
 
 def add_render_gen_args(parser):
     parser.add_argument('--model', required=True,
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index 8908617..96ec8f4 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -27,12 +27,12 @@
 """
 
 import argparse
-import collections
 import colorsys
 import itertools
 import time
 
-from edgetpu.detection.engine import DetectionEngine
+from pycoral.adapters import detect
+from pycoral.utils import edgetpu
 
 from . import svg
 from . import utils
@@ -44,15 +44,6 @@
                                '.bbox': svg.Style(fill_opacity=0.0,
                                                   stroke_width='0.1em')}))
 
-BBox = collections.namedtuple('BBox', ('x', 'y', 'w', 'h'))
-BBox.area = lambda self: self.w * self.h
-BBox.scale = lambda self, sx, sy: BBox(x=self.x * sx, y=self.y * sy,
-                                       w=self.w * sx, h=self.h * sy)
-BBox.__str__ = lambda self: 'BBox(x=%.2f y=%.2f w=%.2f h=%.2f)' % self
-
-Object = collections.namedtuple('Object', ('id', 'label', 'score', 'bbox'))
-Object.__str__ = lambda self: 'Object(id=%d, label=%s, score=%.2f, %s)' % self
-
 def size_em(length):
     return '%sem' % str(0.6 * (length + 1))
 
@@ -72,7 +63,7 @@
 
     return lambda obj_id: 'white'
 
-def overlay(title, objs, get_color, inference_time, inference_rate, layout):
+def overlay(title, objs, get_color, labels, inference_time, inference_rate, layout):
     x0, y0, width, height = layout.window
     font_size = 0.03 * height
 
@@ -86,14 +77,15 @@
 
     for obj in objs:
         percent = int(100 * obj.score)
-        if obj.label:
-            caption = '%d%% %s' % (percent, obj.label)
+        if labels:
+            caption = '%d%% %s' % (percent, labels[obj.id])
         else:
             caption = '%d%%' % percent
 
-        x, y, w, h = obj.bbox.scale(*layout.size)
         color = get_color(obj.id)
-
+        inference_width, inference_height = layout.inference_size
+        bbox = obj.bbox.scale(1.0 / inference_width, 1.0 / inference_height).scale(*layout.size)
+        x, y, w, h = bbox.xmin, bbox.ymin, bbox.width, bbox.height
         doc += svg.Rect(x=x, y=y, width=w, height=h,
                         style='stroke:%s' % color, _class='bbox')
         doc += svg.Rect(x=x, y=y+h ,
@@ -125,26 +117,18 @@
 
     return str(doc)
 
-
-def convert(obj, labels):
-    x0, y0, x1, y1 = obj.bounding_box.flatten().tolist()
-    return Object(id=obj.label_id,
-                  label=labels[obj.label_id] if labels else None,
-                  score=obj.score,
-                  bbox=BBox(x=x0, y=y0, w=x1 - x0, h=y1 - y0))
-
 def print_results(inference_rate, objs):
     print('\nInference (rate=%.2f fps):' % inference_rate)
     for i, obj in enumerate(objs):
-        print('    %d: %s, area=%.2f' % (i, obj, obj.bbox.area()))
+        print('    %d: %s, area=%.2f' % (i, obj, obj.bbox.area))
 
 def render_gen(args):
     fps_counter  = utils.avg_fps_counter(30)
 
-    engines, titles = utils.make_engines(args.model, DetectionEngine)
-    assert utils.same_input_image_sizes(engines)
-    engines = itertools.cycle(engines)
-    engine = next(engines)
+    interpreters, titles = utils.make_interpreters(args.model)
+    assert utils.same_input_image_sizes(interpreters)
+    interpreters = itertools.cycle(interpreters)
+    interpreter = next(interpreters)
 
     labels = utils.load_labels(args.labels) if args.labels else None
     filtered_labels = set(l.strip() for l in args.filter.split(',')) if args.filter else None
@@ -152,7 +136,8 @@
 
     draw_overlay = True
 
-    yield utils.input_image_size(engine)
+    width, height = utils.input_image_size(interpreter)
+    yield width, height
 
     output = None
     while True:
@@ -161,27 +146,28 @@
         inference_rate = next(fps_counter)
         if draw_overlay:
             start = time.monotonic()
-            objs = engine .detect_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k)
+            edgetpu.run_inference(interpreter, tensor)
             inference_time = time.monotonic() - start
-            objs = [convert(obj, labels) for obj in objs]
 
+            objs = detect.get_objects(interpreter, args.threshold)[:args.top_k]
             if labels and filtered_labels:
-                objs = [obj for obj in objs if obj.label in filtered_labels]
+                objs = [obj for obj in objs if labels[obj.id] in filtered_labels]
 
-            objs = [obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area]
+            objs = [obj for obj in objs \
+                    if args.min_area <= obj.bbox.scale(1.0 / width, 1.0 / height).area <= args.max_area]
 
             if args.print:
                 print_results(inference_rate, objs)
 
-            title = titles[engine]
-            output = overlay(title, objs, get_color, inference_time, inference_rate, layout)
+            title = titles[interpreter]
+            output = overlay(title, objs, get_color, labels, inference_time, inference_rate, layout)
         else:
             output = None
 
         if command == 'o':
             draw_overlay = not draw_overlay
         elif command == 'n':
-            engine = next(engines)
+            interpreter = next(interpreters)
 
 def add_render_gen_args(parser):
     parser.add_argument('--model',
diff --git a/edgetpuvision/utils.py b/edgetpuvision/utils.py
index df0892d..8a06c05 100644
--- a/edgetpuvision/utils.py
+++ b/edgetpuvision/utils.py
@@ -17,6 +17,9 @@
 import re
 import time
 
+from pycoral.adapters import common
+from pycoral.utils import edgetpu
+
 LABEL_PATTERN = re.compile(r'\s*(\d+)(.+)')
 
 def load_labels(path):
@@ -24,13 +27,11 @@
        lines = (LABEL_PATTERN.match(line).groups() for line in f.readlines())
        return {int(num): text.strip() for num, text in lines}
 
+def input_image_size(interpreter):
+    return common.input_size(interpreter)
 
-def input_image_size(engine):
-    _, h, w, _ = engine.get_input_tensor_shape()
-    return w, h
-
-def same_input_image_sizes(engines):
-    return len({input_image_size(engine) for engine in engines}) == 1
+def same_input_image_sizes(interpreters):
+    return len({input_image_size(interpreter) for interpreter in interpreters}) == 1
 
 def avg_fps_counter(window_size):
     window = collections.deque(maxlen=window_size)
@@ -43,14 +44,15 @@
         prev = curr
         yield len(window) / sum(window)
 
-def make_engines(models, engine_class):
-    engines, titles = [], {}
+def make_interpreters(models):
+    interpreters, titles = [], {}
     for model in models.split(','):
         if '@' in model:
             model_path, title = model.split('@')
         else:
             model_path, title = model, os.path.basename(os.path.normpath(model))
-        engine = engine_class(model_path)
-        engines.append(engine)
-        titles[engine] = title
-    return engines, titles
+        interpreter = edgetpu.make_interpreter(model_path)
+        interpreter.allocate_tensors()
+        interpreters.append(interpreter)
+        titles[interpreter] = title
+    return interpreters, titles