Use python3-pycoral instead of python3-edgetpu

Cherry pick changes in Enterprise edgetpuvision package.
* Compatible with Frogfish release
* Update dependency in setup.py

Change-Id: I47b60d2c8e056037c5c3150b053aa9dd0e879fbd
diff --git a/README.md b/README.md
index d096ea3..58fe555 100644
--- a/README.md
+++ b/README.md
@@ -10,12 +10,6 @@
 
 ## Debian pacakge
 
-Install `stdeb` package by running `apt-get install python3-stdeb` or
-`pip3 install stdeb`. Then to generate debian folder run:
-```
-python3 setup.py --command-packages=stdeb.command debianize
-```
-
 To build debian pacakge run:
 ```
 dpkg-buildpackage -b -rfakeroot -us -uc -tc
diff --git a/debian/changelog b/debian/changelog
index 22585ec..3e6581c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+edgetpuvision (7-1) mendel-eagle; urgency=medium
+
+  * Switching from python3-edgetpu to python3-pycoral.
+  * Compatible with Frogfish release.
+
+ -- Coral Team <coral-support@google.com>  Tue, 10 Nov 2020 13:00:16 -0800
+
 edgetpuvision (6-4) mendel-eagle; urgency=medium
 
   * Switch to native glbox.
diff --git a/debian/control b/debian/control
index bcace03..5b38594 100644
--- a/debian/control
+++ b/debian/control
@@ -22,7 +22,6 @@
          gstreamer1.0-plugins-coral,
          mdpd,
          python3-cairo,
-         python3-edgetpu,
          python3-gi,
          python3-gi-cairo,
          python3-gst-1.0,
@@ -30,6 +29,7 @@
          python3-numpy,
          python3-pil,
          python3-protobuf,
+         python3-pycoral,
          weston-mtk
 Description: EdgeTPU camera API
  API to run inference on image data coming from the camera.
diff --git a/edgetpuvision/classify.py b/edgetpuvision/classify.py
index e24d7fb..b5a3598 100644
--- a/edgetpuvision/classify.py
+++ b/edgetpuvision/classify.py
@@ -26,7 +26,8 @@
 import numpy as np
 import time
 
-from edgetpu.classification.engine import ClassificationEngine
+from pycoral.adapters import classify
+from pycoral.utils import edgetpu
 from PIL import Image
 
 from . import svg
@@ -111,15 +112,15 @@
 
     fps_counter = utils.avg_fps_counter(30)
 
-    engines, titles = utils.make_engines(args.model, ClassificationEngine)
-    assert utils.same_input_image_sizes(engines)
-    engines = itertools.cycle(engines)
-    engine = next(engines)
+    interpreters, titles = utils.make_interpreters(args.model)
+    assert utils.same_input_image_sizes(interpreters)
+    interpreters = itertools.cycle(interpreters)
+    interpreter = next(interpreters)
 
     labels = utils.load_labels(args.labels)
     draw_overlay = True
 
-    yield utils.input_image_size(engine)
+    yield utils.input_image_size(interpreter)
 
     output = None
     while True:
@@ -127,20 +128,22 @@
 
         inference_rate = next(fps_counter)
         if draw_overlay:
-            _, _, _, c = engine.get_input_tensor_shape()
-            if c == 1:
+            input_shape = interpreter.get_input_details()[0]['shape']
+            if input_shape[3] == 1:
                 image = image.convert('L')
             tensor = np.asarray(image).flatten()
             start = time.monotonic()
-            results = engine.classify_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k)
+            edgetpu.run_inference(interpreter, tensor)
             inference_time = time.monotonic() - start
 
-            results = [(labels[i], score) for i, score in results]
+            classes = classify.get_classes(interpreter, top_k=args.top_k,
+                score_threshold=args.threshold)
+            results = [(labels[class_id], score) for class_id, score in classes]
             results = acc.send(results)
             if args.print:
                 print_results(inference_rate, results)
 
-            title = titles[engine]
+            title = titles[interpreter]
             output = overlay(title, results, inference_time, inference_rate, layout)
         else:
             output = None
@@ -148,7 +151,7 @@
         if command == 'o':
             draw_overlay = not draw_overlay
         elif command == 'n':
-            engine = next(engines)
+            interpreter = next(interpreters)
 
 def add_render_gen_args(parser):
     parser.add_argument('--model', required=True,
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index 6c14b82..198b801 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -27,13 +27,13 @@
 """
 
 import argparse
-import collections
 import colorsys
 import itertools
 import numpy as np
 import time
 
-from edgetpu.detection.engine import DetectionEngine
+from pycoral.adapters import detect
+from pycoral.utils import edgetpu
 from PIL import Image
 
 from . import svg
@@ -46,17 +46,6 @@
                                '.bbox': svg.Style(fill_opacity=0.0,
                                                   stroke_width='0.1em')}))
 
-BBox = collections.namedtuple('BBox', ('x', 'y', 'w', 'h'))
-BBox.area = lambda self: self.w * self.h
-BBox.scale = lambda self, sx, sy, ss: BBox(x=self.x * ss - (ss - sx) / 2,
-                                       y=self.y * ss - (ss - sy) / 2,
-                                       w=self.w * ss,
-                                       h=self.h * ss)
-BBox.__str__ = lambda self: 'BBox(x=%.2f y=%.2f w=%.2f h=%.2f)' % self
-
-Object = collections.namedtuple('Object', ('id', 'label', 'score', 'bbox'))
-Object.__str__ = lambda self: 'Object(id=%d, label=%s, score=%.2f, %s)' % self
-
 def size_em(length):
     return '%sem' % str(0.6 * (length + 1))
 
@@ -76,7 +65,7 @@
 
     return lambda obj_id: 'white'
 
-def overlay(title, objs, get_color, inference_time, inference_rate, layout):
+def overlay(title, objs, get_color, labels, inference_time, inference_rate, layout):
     x0 = 0
     y0 = 0
     width, height = layout.render_size
@@ -91,14 +80,20 @@
 
     for obj in objs:
         percent = int(100 * obj.score)
-        if obj.label:
-            caption = '%d%% %s' % (percent, obj.label)
+        if labels:
+            caption = '%d%% %s' % (percent, labels[obj.id])
         else:
             caption = '%d%%' % percent
 
-        x, y, w, h = obj.bbox.scale(*layout.render_size, max(width, height))
         color = get_color(obj.id)
-
+        inference_width, inference_height = layout.inference_size
+        # TODO: align this with coral edgetpuvision once the image is drawn with gtk window
+        outer_size = max(width, height)
+        bbox = obj.bbox.scale(1.0 / inference_width, 1.0 / inference_height)
+        x = bbox.xmin * outer_size - (outer_size - width) / 2
+        y = bbox.ymin * outer_size - (outer_size - height) / 2
+        w = bbox.width * outer_size
+        h = bbox.height * outer_size
         doc += svg.Rect(x=x, y=y, width=w, height=h,
                         style='stroke:%s' % color, _class='bbox')
         doc += svg.Rect(x=x, y=y+h ,
@@ -130,26 +125,18 @@
 
     return str(doc)
 
-
-def convert(obj, labels):
-    x0, y0, x1, y1 = obj.bounding_box.flatten().tolist()
-    return Object(id=obj.label_id,
-                  label=labels[obj.label_id] if labels else None,
-                  score=obj.score,
-                  bbox=BBox(x=x0, y=y0, w=x1 - x0, h=y1 - y0))
-
 def print_results(inference_rate, objs):
     print('\nInference (rate=%.2f fps):' % inference_rate)
     for i, obj in enumerate(objs):
-        print('    %d: %s, area=%.2f' % (i, obj, obj.bbox.area()))
+        print('    %d: %s, area=%.2f' % (i, obj, obj.bbox.area))
 
 def render_gen(args):
     fps_counter  = utils.avg_fps_counter(30)
 
-    engines, titles = utils.make_engines(args.model, DetectionEngine)
-    assert utils.same_input_image_sizes(engines)
-    engines = itertools.cycle(engines)
-    engine = next(engines)
+    interpreters, titles = utils.make_interpreters(args.model)
+    assert utils.same_input_image_sizes(interpreters)
+    interpreters = itertools.cycle(interpreters)
+    interpreter = next(interpreters)
 
     labels = utils.load_labels(args.labels) if args.labels else None
     filtered_labels = set(l.strip() for l in args.filter.split(',')) if args.filter else None
@@ -157,7 +144,8 @@
 
     draw_overlay = True
 
-    yield utils.input_image_size(engine)
+    width, height = utils.input_image_size(interpreter)
+    yield width, height
 
     output = None
     while True:
@@ -165,32 +153,33 @@
 
         inference_rate = next(fps_counter)
         if draw_overlay:
-            _, _, _, c = engine.get_input_tensor_shape()
-            if c == 1:
+            input_shape = interpreter.get_input_details()[0]['shape']
+            if input_shape[3] == 1:
                 image = image.convert('L')
             tensor = np.asarray(image).flatten()
             start = time.monotonic()
-            objs = engine .detect_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k)
+            edgetpu.run_inference(interpreter, tensor)
             inference_time = time.monotonic() - start
-            objs = [convert(obj, labels) for obj in objs]
 
+            objs = detect.get_objects(interpreter, args.threshold)[:args.top_k]
             if labels and filtered_labels:
-                objs = [obj for obj in objs if obj.label in filtered_labels]
+                objs = [obj for obj in objs if labels[obj.id] in filtered_labels]
 
-            objs = [obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area]
+            objs = [obj for obj in objs \
+                    if args.min_area <= obj.bbox.scale(1.0 / width, 1.0 / height).area <= args.max_area]
 
             if args.print:
                 print_results(inference_rate, objs)
 
-            title = titles[engine]
-            output = overlay(title, objs, get_color, inference_time, inference_rate, layout)
+            title = titles[interpreter]
+            output = overlay(title, objs, get_color, labels, inference_time, inference_rate, layout)
         else:
             output = None
 
         if command == 'o':
             draw_overlay = not draw_overlay
         elif command == 'n':
-            engine = next(engines)
+            interpreter = next(interpreters)
 
 def add_render_gen_args(parser):
     parser.add_argument('--model',
diff --git a/edgetpuvision/utils.py b/edgetpuvision/utils.py
index df0892d..8a06c05 100644
--- a/edgetpuvision/utils.py
+++ b/edgetpuvision/utils.py
@@ -17,6 +17,9 @@
 import re
 import time
 
+from pycoral.adapters import common
+from pycoral.utils import edgetpu
+
 LABEL_PATTERN = re.compile(r'\s*(\d+)(.+)')
 
 def load_labels(path):
@@ -24,13 +27,11 @@
        lines = (LABEL_PATTERN.match(line).groups() for line in f.readlines())
        return {int(num): text.strip() for num, text in lines}
 
+def input_image_size(interpreter):
+    return common.input_size(interpreter)
 
-def input_image_size(engine):
-    _, h, w, _ = engine.get_input_tensor_shape()
-    return w, h
-
-def same_input_image_sizes(engines):
-    return len({input_image_size(engine) for engine in engines}) == 1
+def same_input_image_sizes(interpreters):
+    return len({input_image_size(interpreter) for interpreter in interpreters}) == 1
 
 def avg_fps_counter(window_size):
     window = collections.deque(maxlen=window_size)
@@ -43,14 +44,15 @@
         prev = curr
         yield len(window) / sum(window)
 
-def make_engines(models, engine_class):
-    engines, titles = [], {}
+def make_interpreters(models):
+    interpreters, titles = [], {}
     for model in models.split(','):
         if '@' in model:
             model_path, title = model.split('@')
         else:
             model_path, title = model, os.path.basename(os.path.normpath(model))
-        engine = engine_class(model_path)
-        engines.append(engine)
-        titles[engine] = title
-    return engines, titles
+        interpreter = edgetpu.make_interpreter(model_path)
+        interpreter.allocate_tensors()
+        interpreters.append(interpreter)
+        titles[interpreter] = title
+    return interpreters, titles
diff --git a/setup.py b/setup.py
index 1819dbb..5d2a0be 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
 
 setup(
     name='edgetpuvision',
-    version='1.0',
+    version='7.0',
     description='EdgeTPU camera API',
     long_description='API to run inference on image data coming from the camera.',
     author='Coral',
@@ -16,7 +16,7 @@
         'Pillow>=4.0.0',
         'pygobject>=3.22.0',
         'protobuf>=3.0.0',
-        'edgetpu',
+        'pycoral>=1.0.0',
     ],
     entry_points = {
         'console_scripts': ['edgetpu_classify=edgetpuvision.classify:main',
diff --git a/stdeb.cfg b/stdeb.cfg
deleted file mode 100644
index 8a71537..0000000
--- a/stdeb.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[DEFAULT]
-Depends3: python3-numpy, python3-protobuf, python3-pil, python3-gi, python3-gst-1.0, edgetpu-api