Enable edgetpudemo on Excelsior

GL path doesn't work on MT8167. :( Need to reconstruct the pipeline to
avoid using GPU at all until the issue is fixed.

The first CL will only work with video file input. Need to fix other
sources in later CLs.

For some reason, the pipeline will not exit elegantly or support
looping. The use may need to run "reset" on terminal to clear the
console.

Test: edgetpu_demo --device on Excelsior

Change-Id: I77e49d01f4716117398c72c89a2df04ba150f4aa
diff --git a/debian/control b/debian/control
index 5eacd9d..3150757 100644
--- a/debian/control
+++ b/debian/control
@@ -14,22 +14,18 @@
          gir1.2-gst-plugins-base-1.0,
          gir1.2-gstreamer-1.0,
          gir1.2-gtk-3.0,
-         gstreamer1.0-gl,
          gstreamer1.0-plugins-bad,
          gstreamer1.0-plugins-good,
          gstreamer1.0-plugins-ugly,
-         gstreamer1.0-python3-plugin-loader,
          python3-cairo,
          python3-edgetpu,
          python3-gi,
          python3-gi-cairo,
          python3-gst-1.0,
-         python3-opengl,
          python3-numpy,
          python3-pil,
          python3-protobuf,
-         weston
-Recommends: imx-gst1.0-plugin,
+         weston-mtk
 Description: EdgeTPU camera API
  API to run inference on image data coming from the camera.
 
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index 8908617..44bda57 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -73,14 +73,15 @@
     return lambda obj_id: 'white'
 
 def overlay(title, objs, get_color, inference_time, inference_rate, layout):
-    x0, y0, width, height = layout.window
+    x0 = 0
+    y0 = 0
+    width, height = layout.render_size
     font_size = 0.03 * height
 
     defs = svg.Defs()
     defs += CSS_STYLES
 
     doc = svg.Svg(width=width, height=height,
-                  viewBox='%s %s %s %s' % layout.window,
                   font_size=font_size, font_family='monospace', font_weight=500)
     doc += defs
 
@@ -91,7 +92,7 @@
         else:
             caption = '%d%%' % percent
 
-        x, y, w, h = obj.bbox.scale(*layout.size)
+        x, y, w, h = obj.bbox.scale(*layout.render_size)
         color = get_color(obj.id)
 
         doc += svg.Rect(x=x, y=y, width=w, height=h,
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index afed6ea..3f80304 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -34,10 +34,9 @@
 gi.require_version('GObject', '2.0')
 gi.require_version('Gst', '1.0')
 gi.require_version('GstBase', '1.0')
-gi.require_version('GstGL', '1.0')
 gi.require_version('GstPbutils', '1.0')
 gi.require_version('GstVideo', '1.0')
-from gi.repository import GLib, GObject, Gst, GstBase, GstGL, GstVideo, Gtk
+from gi.repository import GLib, GObject, Gst, GstBase, GstVideo, Gtk
 
 GObject.threads_init()
 Gst.init([])
@@ -140,7 +139,11 @@
 
 def make_layout(inference_size, render_size):
     inference_size = Size(*inference_size)
+    # render_size capped to 1280x720
     render_size = Size(*render_size)
+    width, height = render_size
+    if width > 1280:
+        render_size = render_size * 1280 / width
     size = min_outer_size(inference_size, render_size)
     window = center_inside(render_size, size)
     return Layout(size=size, window=window,
@@ -225,12 +228,16 @@
             Gtk.main_quit()
         else:
             custom_command = command
-
-        svg = render_overlay(np.frombuffer(data, dtype=np.uint8),
+        # Resize the image before it is consumed by the model.
+        inference_img = Image.frombytes('RGB', caps_size(sample.get_caps()), data, 'raw')
+        image_width, image_height = inference_img.size
+        inference_img = inference_img.resize(layout.inference_size, Image.NEAREST)
+        svg = render_overlay(np.asarray(inference_img).flatten(),
                              command=custom_command)
-        glsink = pipeline.get_by_name('glsink')
-        if glsink:
-            glsink.set_property('svg', svg)
+
+        svg_overlay = pipeline.get_by_name('svg_overlay')
+        if svg_overlay:
+            svg_overlay.set_property('data', svg)
 
         if save_frame:
             images.put((data, layout.inference_size, svg))
@@ -316,30 +323,21 @@
                     allocation.width, allocation.height)
             return False
 
-        window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
-        window.set_title(WINDOW_TITLE)
-        window.set_default_size(layout.render_size.width, layout.render_size.height)
-        if display is Display.FULLSCREEN:
-            window.fullscreen()
+        # TODO: re-enable this when we have proper GL support.
+        # window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
+        # window.set_title(WINDOW_TITLE)
+        # window.set_default_size(layout.render_size.width, layout.render_size.height)
+        # if display is Display.FULLSCREEN:
+        #     window.fullscreen()
 
-        drawing_area = Gtk.DrawingArea()
-        window.add(drawing_area)
-        drawing_area.realize()
+        # drawing_area = Gtk.DrawingArea()
+        # window.add(drawing_area)
+        # drawing_area.realize()
 
         glsink = pipeline.get_by_name('glsink')
-        glsink.connect('drawn', on_gl_draw, drawing_area)
 
-        # Wayland window handle.
-        wl_handle = glsink.get_wayland_window_handle(drawing_area)
-        glsink.set_window_handle(wl_handle)
-
-        # Wayland display context wrapped as a GStreamer context.
-        wl_display = glsink.get_default_wayland_display_context()
-        glsink.set_context(wl_display)
-
-        drawing_area.connect('configure-event', on_widget_configure, glsink)
-        window.connect('delete-event', Gtk.main_quit)
-        window.show_all()
+        # window.connect('delete-event', Gtk.main_quit)
+        # window.show_all()
 
         # The appsink pipeline branch must use the same GL display as the screen
         # rendering so they get the same GL context. This isn't automatically handled
@@ -347,12 +345,13 @@
         def on_bus_message_sync(bus, message, glsink):
             if message.type == Gst.MessageType.NEED_CONTEXT:
                 _, context_type = message.parse_context_type()
-                if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
+                if context_type == 'gst.gl.GLDisplay':
                     sinkelement = glsink.get_by_interface(GstVideo.VideoOverlay)
                     gl_context = sinkelement.get_property('context')
                     if gl_context:
-                        display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
-                        GstGL.context_set_gl_display(display_context, gl_context.get_display())
+                        display_context = Gst.Context.new('gst.gl.GLDisplay', True)
+                        display_structure = display_context.writable_structure()
+                        display_structure.set_value('gst.gl.GLDisplay', gl_context.get_display())
                         message.src.set_context(display_context)
             return Gst.BusSyncReply.PASS
 
diff --git a/edgetpuvision/pipelines.py b/edgetpuvision/pipelines.py
index 95eaa50..33daaea 100644
--- a/edgetpuvision/pipelines.py
+++ b/edgetpuvision/pipelines.py
@@ -18,6 +18,10 @@
     return [
         Source('file', location=filename),
         Filter('decodebin'),
+        Filter('mtkmdp width=1280 height=720 format=YUY2'),
+        Queue(max_size_buffers=1, leaky='downstream'),
+        Filter('videoconvert'),
+        Caps('video/x-raw', format='BGRA'),
     ]
 
 def v4l2_src(fmt):
@@ -28,7 +32,10 @@
     ]
 
 def display_sink():
-    return Sink('glsvgoverlay', name='glsink'),
+    return [
+        Filter('rsvgoverlay', name='svg_overlay'),
+        Sink('wayland', name='glsink', sync='false')
+    ]
 
 def h264_sink():
     return Sink('app', name='h264sink', emit_signals=True, max_buffers=1, drop=False, sync=False)
@@ -36,8 +43,8 @@
 def inference_pipeline(layout, stillimage=False):
     size = max_inner_size(layout.render_size, layout.inference_size)
     return [
-        Filter('glfilterbin', filter='glbox'),
-        Caps('video/x-raw', format='RGB', width=layout.inference_size.width, height=layout.inference_size.height),
+        Filter('videoconvert'),
+        Caps('video/x-raw', format='RGB'),
         Sink('app', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False),
     ]
 
@@ -47,7 +54,6 @@
         [decoded_file_src(filename),
          Filter('imagefreeze'),
          Caps('video/x-raw', framerate='30/1'),
-         Filter('glupload'),
          Tee(name='t')],
         [Pad('t'),
          Queue(),
@@ -61,10 +67,9 @@
 def video_display_pipeline(filename, layout):
     return (
         [decoded_file_src(filename),
-         Filter('glupload'),
          Tee(name='t')],
         [Pad('t'),
-         Queue(),
+         Queue(max_size_buffers=1, leaky='downstream'),
          display_sink()],
         [Pad('t'),
          Queue(max_size_buffers=1, leaky='downstream'),
@@ -74,7 +79,6 @@
 def camera_display_pipeline(fmt, layout):
     return (
         [v4l2_src(fmt),
-         Filter('glupload'),
          Tee(name='t')],
         [Pad('t'),
          Queue(),
@@ -89,21 +93,18 @@
     return (
       [decoded_file_src(filename),
        Filter('imagefreeze'),
-       Filter('glupload'),
        inference_pipeline(layout)],
     )
 
 def video_headless_pipeline(filename, layout):
     return (
         [decoded_file_src(filename),
-         Filter('glupload'),
          inference_pipeline(layout)],
     )
 
 def camera_headless_pipeline(fmt, layout):
     return (
         [v4l2_src(fmt),
-         Filter('glupload'),
          inference_pipeline(layout)],
     )