Making pipeline code more reusable.

Change-Id: I24cc5c17a316c8fc7b3477e6b93706828b00d383
diff --git a/edgetpuvision/camera.py b/edgetpuvision/camera.py
index 39ef23e..a588425 100644
--- a/edgetpuvision/camera.py
+++ b/edgetpuvision/camera.py
@@ -10,8 +10,7 @@
 
 class Camera:
     def __init__(self, render_size, inference_size):
-        self._render_size = Size(*render_size)
-        self._inference_size = Size(*inference_size)
+        self._layout = gstreamer.make_layout(Size(*inference_size), Size(*render_size))
 
         self._loop = gstreamer.loop()
         self._thread = None
@@ -20,20 +19,18 @@
 
     @property
     def resolution(self):
-        return self._render_size
+        return self._layout.render_size
 
     def request_key_frame(self):
         pass
 
     def start_recording(self, obj, format, profile, inline_headers, bitrate, intra_period):
-        layout = gstreamer.make_layout(self._inference_size, self._render_size)
-
         def on_buffer(data, _):
             obj.write(data)
 
         def on_image(data, _):
             if self.on_image:
-                self.on_image(np.frombuffer(data, dtype=np.uint8), layout)
+                self.on_image(np.frombuffer(data, dtype=np.uint8), self._layout)
 
         signals = {
           'h264sink': {'new-sample': gstreamer.new_sample_callback(on_buffer)},
@@ -60,7 +57,7 @@
         self._filename = filename
 
     def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
-        return pipelines.video_streaming_pipeline(self._filename, self._render_size, self._inference_size)
+        return pipelines.video_streaming_pipeline(self._filename, self._layout)
 
 class V4L2Camera(Camera):
     def __init__(self, fmt, inference_size):
@@ -68,8 +65,7 @@
         self._fmt = fmt
 
     def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
-        return pipelines.camera_streaming_pipeline(self._fmt, profile, bitrate,
-                                                   self._render_size, self._inference_size)
+        return pipelines.camera_streaming_pipeline(self._fmt, profile, bitrate, self._layout)
 
 def make_camera(source, inference_size):
     fmt = parse_format(source)
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index 352a95e..874cecf 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -34,6 +34,7 @@
 
 COMMAND_SAVE_FRAME = ' '
 COMMAND_PRINT_INFO = 'p'
+COMMAND_QUIT       = 'q'
 
 class Display(enum.Enum):
     FULLSCREEN = 'fullscreen'
@@ -96,13 +97,13 @@
             f.write(overlay)
         print('Overlay saved as "%s"' % name)
 
-
-Layout = collections.namedtuple('Layout', ('size', 'window'))
+Layout = collections.namedtuple('Layout', ('size', 'window', 'inference_size', 'render_size'))
 
 def make_layout(inference_size, render_size):
     size = min_outer_size(inference_size, render_size)
     window = center_inside(render_size, size)
-    return Layout(size=size, window=window)
+    return Layout(size=size, window=window,
+                  inference_size=inference_size, render_size=render_size)
 
 def caps_size(caps):
     structure = caps.get_structure(0)
@@ -132,7 +133,7 @@
     buf.unmap(mapinfo)
 
 def new_sample_callback(process):
-    def callback(sink, pipeline):
+    def callback(sink, pipeline, loop):
         with pull_sample(sink) as (sample, data):
             process(data, caps_size(sample.get_caps()))
         return Gst.FlowReturn.OK
@@ -161,7 +162,7 @@
         component = pipeline.get_by_name(name)
         if component:
             for signal_name, signal_handler in signals.items():
-                component.connect(signal_name, signal_handler, pipeline)
+                component.connect(signal_name, signal_handler, pipeline, loop)
 
     # Set up a pipeline bus watch to catch errors.
     bus = pipeline.get_bus()
@@ -177,13 +178,16 @@
     finally:
         pipeline.set_state(Gst.State.NULL)
 
+    # Process all pending operations on the loop.
+    while loop.get_context().iteration(False):
+        pass
 
 def on_keypress(fd, flags, commands):
     for ch in sys.stdin.read():
         commands.put(ch)
     return True
 
-def on_new_sample(sink, pipeline, render_overlay, render_size, images, commands):
+def on_new_sample(sink, pipeline, loop, render_overlay, layout, images, commands):
     with pull_sample(sink) as (sample, data):
         custom_command = None
         save_frame = False
@@ -193,8 +197,10 @@
             save_frame = True
         elif command == COMMAND_PRINT_INFO:
             print('Timestamp: %.2f' % time.monotonic())
-            print('Render size: %d x %d' % render_size)
-            print('Inference size: %d x %d' % caps_size(sample.get_caps()))
+            print('Render size: %d x %d' % layout.render_size)
+            print('Inference size: %d x %d' % layout.inference_size)
+        elif  command == COMMAND_QUIT:
+            loop.quit()
         else:
             custom_command = command
 
@@ -205,7 +211,7 @@
             overlay.set_property('data', svg)
 
         if save_frame:
-            images.put((data, caps_size(sample.get_caps()), svg))
+            images.put((data, layout.inference_size, svg))
 
     return Gst.FlowReturn.OK
 
@@ -219,55 +225,50 @@
         display=display)
 
 def run(inference_size, render_overlay, *, source, downscale, display):
-    fmt = parse_format(source)
-    if fmt:
-        run_camera(inference_size, render_overlay, fmt, display)
-        return True
-
-    filename = os.path.expanduser(source)
-    if os.path.isfile(filename):
-        run_file(inference_size, render_overlay,
-                 filename=filename,
-                 downscale=downscale,
-                 display=display)
+    result = get_pipeline(source, inference_size, downscale, display)
+    if result:
+        layout, pipeline = result
+        run_loop(pipeline, layout, render_overlay)
         return True
 
     return False
 
-
-def run_camera(inference_size, render_overlay, fmt, display):
+def get_pipeline(source, inference_size, downscale, display):
     inference_size = Size(*inference_size)
-    render_size = fmt.size
+    fmt = parse_format(source)
+    if fmt:
+        layout = make_layout(inference_size, fmt.size)
+        return layout, camera_pipeline(fmt, layout, display)
 
+    filename = os.path.expanduser(source)
+    if os.path.isfile(filename):
+        info = get_video_info(filename)
+        render_size = Size(info.get_width(), info.get_height()) / downscale
+        layout = make_layout(inference_size, render_size)
+        return layout, file_pipline(filename, info, layout, display)
+
+    return None
+
+def camera_pipeline(fmt, layout, display):
     if display is Display.NONE:
-        pipeline = camera_headless_pipeline(fmt, render_size, inference_size)
+        return camera_headless_pipeline(fmt, layout)
     else:
-        pipeline = camera_display_pipeline(fmt, render_size, inference_size,
-                                           display is Display.FULLSCREEN)
+        return camera_display_pipeline(fmt, layout, display is Display.FULLSCREEN)
 
-    return run_loop(pipeline, inference_size, render_size, render_overlay)
-
-
-def run_file(inference_size, render_overlay, *, filename, downscale, display):
-    inference_size = Size(*inference_size)
-    info = get_video_info(filename)
-    render_size = Size(info.get_width(), info.get_height()) / downscale
-
+def file_pipline(filename, info, layout, display):
     if display is Display.NONE:
         if info.is_image():
-            pipeline = image_headless_pipeline(filename, render_size, inference_size)
+            return image_headless_pipeline(filename, layout)
         else:
-            pipeline = video_headless_pipeline(filename, render_size, inference_size)
+            return video_headless_pipeline(filename, layout)
     else:
         fullscreen = display is Display.FULLSCREEN
         if info.is_image():
-            pipeline = image_display_pipeline(filename, render_size, inference_size, fullscreen)
+            return image_display_pipeline(filename, layout, fullscreen)
         else:
-            pipeline = video_display_pipeline(filename, render_size, inference_size, fullscreen)
+            return video_display_pipeline(filename, layout, fullscreen)
 
-    return run_loop(pipeline, inference_size, render_size, render_overlay)
-
-def run_loop(pipeline, inference_size, render_size, render_overlay):
+def run_loop(pipeline, layout, render_overlay):
     loop = GLib.MainLoop()
     commands = queue.Queue()
 
@@ -281,12 +282,8 @@
 
         run_pipeline(loop, pipeline, {'appsink': {'new-sample':
             functools.partial(on_new_sample,
-                render_overlay=functools.partial(render_overlay,
-                                                 layout=make_layout(inference_size, render_size)),
-                render_size=render_size,
+                render_overlay=functools.partial(render_overlay, layout=layout),
+                layout=layout,
                 images=images,
                 commands=commands)}
         })
-
-    while GLib.MainContext.default().iteration(False):
-        pass
diff --git a/edgetpuvision/pipelines.py b/edgetpuvision/pipelines.py
index 70d1d88..3596ca6 100644
--- a/edgetpuvision/pipelines.py
+++ b/edgetpuvision/pipelines.py
@@ -19,21 +19,20 @@
 def h264_sink():
     return Sink('app', name='h264sink', emit_signals=True, max_buffers=1, drop=False, sync=False)
 
-def inference_pipeline(render_size, inference_size):
-    size = max_inner_size(render_size, inference_size)
+def inference_pipeline(layout):
+    size = max_inner_size(layout.render_size, layout.inference_size)
     return [
         Filter('glfilterbin', filter='glcolorscale'),
         Caps('video/x-raw', format='RGBA', width=size.width, height=size.height),
         Filter('videoconvert'),
         Caps('video/x-raw', format='RGB', width=size.width, height=size.height),
         Filter('videobox', autocrop=True),
-        Caps('video/x-raw', width=inference_size.width, height=inference_size.height),
+        Caps('video/x-raw', width=layout.inference_size.width, height=layout.inference_size.height),
         Sink('app', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False),
     ]
 
 # Display
-def image_display_pipeline(filename, render_size, inference_size, fullscreen):
-    size = max_inner_size(render_size, inference_size)
+def image_display_pipeline(filename, layout, fullscreen):
     return (
         [decoded_file_src(filename),
          Tee(name='t')],
@@ -42,17 +41,17 @@
          Filter('imagefreeze'),
          Filter('videoconvert'),
          Filter('videoscale'),
-         Caps('video/x-raw', width=render_size.width, height=render_size.height),
+         Caps('video/x-raw', width=layout.render_size.width, height=layout.render_size.height),
          Filter('rsvgoverlay', name='overlay'),
          display_sink(fullscreen)],
         [Pad('t'),
          Queue(),
          Filter('imagefreeze'),
          Filter('glupload'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
-def video_display_pipeline(filename, render_size, inference_size, fullscreen):
+def video_display_pipeline(filename, layout, fullscreen):
     return (
         [decoded_file_src(filename),
          Filter('glupload'),
@@ -61,14 +60,14 @@
          Queue(max_size_buffers=1),
          Filter('glfilterbin', filter='glcolorscale'),
          Filter('rsvgoverlay', name='overlay'),
-         Caps('video/x-raw', width=render_size.width, height=render_size.height),
+         Caps('video/x-raw', width=layout.render_size.width, height=layout.render_size.height),
          display_sink(fullscreen)],
         [Pad('t'),
          Queue(max_size_buffers=1, leaky='downstream'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
-def camera_display_pipeline(fmt, render_size, inference_size, fullscreen):
+def camera_display_pipeline(fmt, layout, fullscreen):
     return (
         [v4l2_src(fmt),
          Filter('glupload'),
@@ -80,34 +79,34 @@
          display_sink(fullscreen)],
         [Pad(name='t'),
          Queue(max_size_buffers=1, leaky='downstream'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
 # Headless
-def image_headless_pipeline(filename, render_size, inference_size):
+def image_headless_pipeline(filename, layout):
     return (
       [decoded_file_src(filename),
        Filter('imagefreeze'),
        Filter('glupload'),
-       inference_pipeline(render_size, inference_size)],
+       inference_pipeline(layout)],
     )
 
-def video_headless_pipeline(filename, render_size, inference_size):
+def video_headless_pipeline(filename, layout):
     return (
         [decoded_file_src(filename),
          Filter('glupload'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
-def camera_headless_pipeline(fmt, render_size, inference_size):
+def camera_headless_pipeline(fmt, layout):
     return (
         [v4l2_src(fmt),
          Filter('glupload'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
 # Streaming
-def video_streaming_pipeline(filename, render_size, inference_size):
+def video_streaming_pipeline(filename, layout):
     return (
         [Source('file', location=filename),
          Filter('qtdemux'),
@@ -120,11 +119,10 @@
         [Pad('t'),
          Queue(max_size_buffers=1),
          Filter('decodebin'),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
 
-def camera_streaming_pipeline(fmt, profile, bitrate, render_size, inference_size):
-    size = max_inner_size(render_size, inference_size)
+def camera_streaming_pipeline(fmt, profile, bitrate, layout):
     return (
         [v4l2_src(fmt), Tee(name='t')],
         [Pad('t'),
@@ -143,5 +141,5 @@
           h264_sink()],
         [Pad('t'),
          Queue(),
-         inference_pipeline(render_size, inference_size)],
+         inference_pipeline(layout)],
     )
\ No newline at end of file