Enable glsvgoverlay for video file and camera source pipelines

Other pipelines are untested.

Change-Id: Ib9656e24939ffa30f994c77a4e1959e409410a16
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index 58e6ae4..16dbdab 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -174,19 +174,22 @@
     return None
 
 @contextlib.contextmanager
-def pull_sample(sink):
-    sample = sink.emit('pull-sample')
+def pull_sample(sink, preroll=False):
+    if preroll:
+        sample = sink.emit('pull-preroll')
+    else:
+        sample = sink.emit('pull-sample')
     buf = sample.get_buffer()
     meta = GstVideo.buffer_get_video_meta(buf)
 
     result, mapinfo = buf.map(Gst.MapFlags.READ)
     if result:
-        yield sample, mapinfo.data, meta
+        yield sample, mapinfo.data, meta, buf.pts
     buf.unmap(mapinfo)
 
 def new_sample_callback(process):
     def callback(sink, pipeline):
-        with pull_sample(sink) as (sample, data, meta):
+        with pull_sample(sink) as (sample, data, meta, pts):
             process(data, caps_size(sample.get_caps()))
         return Gst.FlowReturn.OK
     return callback
@@ -213,8 +216,8 @@
     if overlay:
         overlay.set_eos()
 
-def on_new_sample(sink, pipeline, render_overlay, layout, images, get_command):
-    with pull_sample(sink) as (sample, data, meta):
+def on_new_sample(sink, pipeline, render_overlay, layout, images, get_command, preroll):
+    with pull_sample(sink, preroll) as (sample, data, meta, pts):
         custom_command = None
         save_frame = False
 
@@ -240,9 +243,9 @@
         svg = render_overlay(inference_img,
                              command=custom_command)
 
-        svg_overlay = pipeline.get_by_name('svg_overlay')
-        if svg_overlay:
-            svg_overlay.set_property('data', svg)
+        overlay = pipeline.get_by_name('overlay')
+        if overlay:
+            overlay.emit('set-svg', svg, pts)
 
         if save_frame:
             images.put((data, layout.inference_size, svg))
@@ -340,8 +343,8 @@
         # drawing_area.realize()
 
         glsink = pipeline.get_by_name('glsink')
-        if display is Display.FULLSCREEN:
-          glsink.set_property('fullscreen', True)
+        #if display is Display.FULLSCREEN:
+        #  glsink.set_property('fullscreen', True)
 
         # window.connect('delete-event', Gtk.main_quit)
         # window.show_all()
@@ -370,7 +373,14 @@
                 render_overlay=functools.partial(render_overlay, layout=layout),
                 layout=layout,
                 images=images,
-                get_command=get_command),
+                get_command=get_command,
+                preroll=False),
+             'new-preroll': functools.partial(on_new_sample,
+                render_overlay=functools.partial(render_overlay, layout=layout),
+                layout=layout,
+                images=images,
+                get_command=get_command,
+                preroll=True),
              'eos' : on_sink_eos},
             **(signals or {})
         }
diff --git a/edgetpuvision/pipelines.py b/edgetpuvision/pipelines.py
index 89978d7..f51a045 100644
--- a/edgetpuvision/pipelines.py
+++ b/edgetpuvision/pipelines.py
@@ -20,6 +20,9 @@
         Filter('decodebin'),
         Filter('v4l2convert'),
         Caps('video/x-raw,width=%d,height=%d,format=BGRA' % (render_size.width, render_size.height)),
+        Filter('glupload'),
+        Filter('glcolorconvert'),
+        Caps('video/x-raw(memory:GLMemory),format=RGBA'),
     ]
 
 def v4l2_src(fmt, render_size):
@@ -27,14 +30,18 @@
         Source('v4l2', device=fmt.device),
         Caps('video/x-raw', format=fmt.pixel, width=fmt.size.width, height=fmt.size.height,
              framerate='%d/%d' % fmt.framerate),
-        Filter('glfilterbin filter=glbox'),
-        Caps('video/x-raw', width=render_size.width, height=render_size.height, format='BGRA'),
+        Filter('v4l2convert'),
+        Caps('video/x-raw,width=%d,height=%d,format=BGRA' % (render_size.width, render_size.height)),
+        Filter('glupload'),
+        Filter('glcolorconvert'),
+        Caps('video/x-raw(memory:GLMemory),format=RGBA'),
     ]
 
-def display_sink():
+def display_sink(sync=True):
     return [
-        Filter('rsvgoverlay', name='svg_overlay'),
-        Sink('wayland', name='glsink', sync=False)
+        Queue(),
+        Filter('glsvgoverlay', name='overlay', sync=True),
+        Sink('glimage', name='glsink', sync=sync, qos=False, max_lateness=-1)
     ]
 
 def h264_sink():
@@ -43,9 +50,11 @@
 def inference_pipeline(layout, stillimage=False):
     size = max_inner_size(layout.render_size, layout.inference_size)
     return [
+        Queue(),
         Filter('glfilterbin', filter='glbox'),
         Caps('video/x-raw', format='RGB', width=layout.inference_size.width, height=layout.inference_size.height),
-        Sink('app', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False),
+        Queue(),
+        Sink('app', name='appsink', emit_signals=True, sync=False),
     ]
 
 # Display
@@ -69,11 +78,9 @@
         [decoded_file_src(filename, layout.render_size),
          Tee(name='t')],
         [Pad('t'),
-         Queue(max_size_buffers=1, leaky='downstream'),
-         display_sink()],
-        [Pad('t'),
-         Queue(max_size_buffers=1, leaky='downstream'),
          inference_pipeline(layout)],
+        [Pad('t'),
+         display_sink()],
     )
 
 def camera_display_pipeline(fmt, layout):
@@ -81,11 +88,9 @@
         [v4l2_src(fmt, layout.render_size),
          Tee(name='t')],
         [Pad('t'),
-         Queue(max_size_buffers=1, leaky='downstream'),
-         display_sink()],
-        [Pad(name='t'),
-         Queue(max_size_buffers=1, leaky='downstream'),
          inference_pipeline(layout)],
+        [Pad('t'),
+         display_sink(False)],
     )
 
 # Headless