Add --loop command line parameter for video.

Change-Id: I2e10f02273cde7f39ffc86c12085e66ddba741a2
diff --git a/edgetpuvision/apps.py b/edgetpuvision/apps.py
index 918ed74..9c5febd 100644
--- a/edgetpuvision/apps.py
+++ b/edgetpuvision/apps.py
@@ -19,12 +19,14 @@
                         default='/dev/video0:YUY2:640x480:30/1')
     parser.add_argument('--bitrate', type=int, default=1000000,
                         help='Video streaming bitrate (bit/s)')
+    parser.add_argument('--loop', default=False, action='store_true',
+                        help='Loop input video file')
 
     add_render_gen_args(parser)
     args = parser.parse_args()
 
     gen = render_gen(args)
-    camera = make_camera(args.source, next(gen))
+    camera = make_camera(args.source, next(gen), args.loop)
     assert camera is not None
 
     with StreamingServer(camera, args.bitrate) as server:
@@ -43,6 +45,8 @@
                         default='/dev/video0:YUY2:1280x720:30/1')
     parser.add_argument('--downscale', type=float, default=2.0,
                         help='Downscale factor for .mp4 file rendering')
+    parser.add_argument('--loop',  default=False, action='store_true',
+                        help='Loop input video file')
     parser.add_argument('--displaymode', type=Display, choices=Display, default=Display.FULLSCREEN,
                         help='Display mode')
     add_render_gen_args(parser)
@@ -51,5 +55,6 @@
     if not run_gen(render_gen(args),
                    source=args.source,
                    downscale=args.downscale,
+                   loop=args.loop,
                    display=args.displaymode):
         print('Invalid source argument:', args.source)
diff --git a/edgetpuvision/camera.py b/edgetpuvision/camera.py
index 590c4df..ded5e46 100644
--- a/edgetpuvision/camera.py
+++ b/edgetpuvision/camera.py
@@ -7,9 +7,9 @@
 from .gst import *
 
 class Camera:
-    def __init__(self, render_size, inference_size):
+    def __init__(self, render_size, inference_size, loop):
         self._layout = gstreamer.make_layout(inference_size, render_size)
-
+        self._loop = loop
         self._thread = None
         self.render_overlay = None
 
@@ -36,8 +36,9 @@
         pipeline = self.make_pipeline(format, profile, inline_headers, bitrate, intra_period)
 
         self._thread = threading.Thread(target=gstreamer.run_pipeline,
-                                        args=(pipeline, self._layout, render_overlay,
-                                              gstreamer.Display.NONE, False, signals))
+                                        args=(pipeline, self._layout, self._loop,
+                                              render_overlay, gstreamer.Display.NONE,
+                                              False, signals))
         self._thread.start()
 
     def stop_recording(self):
@@ -48,29 +49,30 @@
         raise NotImplemented
 
 class FileCamera(Camera):
-    def __init__(self, filename, inference_size):
+    def __init__(self, filename, inference_size, loop):
         info = gstreamer.get_video_info(filename)
-        super().__init__((info.get_width(), info.get_height()), inference_size)
+        super().__init__((info.get_width(), info.get_height()), inference_size,
+                          loop=loop)
         self._filename = filename
 
     def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
         return pipelines.video_streaming_pipeline(self._filename, self._layout)
 
-class V4L2Camera(Camera):
+class DeviceCamera(Camera):
     def __init__(self, fmt, inference_size):
-        super().__init__(fmt.size, inference_size)
+        super().__init__(fmt.size, inference_size, loop=False)
         self._fmt = fmt
 
     def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
         return pipelines.camera_streaming_pipeline(self._fmt, profile, bitrate, self._layout)
 
-def make_camera(source, inference_size):
+def make_camera(source, inference_size, loop):
     fmt = parse_format(source)
     if fmt:
-        return V4L2Camera(fmt, inference_size)
+        return DeviceCamera(fmt, inference_size)
 
     filename = os.path.expanduser(source)
     if os.path.isfile(filename):
-        return FileCamera(filename, inference_size)
+        return FileCamera(filename, inference_size, loop)
 
     return None
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index f39775b..6c0018f 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -145,6 +145,13 @@
     assert len(streams) == 1
     return streams[0]
 
+def is_seekable(element):
+    query = Gst.Query.new_seeking(Gst.Format.TIME)
+    if element.query(query):
+        _,  seekable, _, _ = query.parse_seeking()
+        return seekable
+    return False
+
 @contextlib.contextmanager
 def pull_sample(sink):
     sample = sink.emit('pull-sample')
@@ -162,9 +169,14 @@
         return Gst.FlowReturn.OK
     return callback
 
-def on_bus_message(bus, message):
+def on_bus_message(bus, message, pipeline, loop):
     if message.type == Gst.MessageType.EOS:
-        Gtk.main_quit()
+        if loop and is_seekable(pipeline):
+            flags = Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT
+            if not pipeline.seek_simple(Gst.Format.TIME, flags, 0):
+                Gtk.main_quit()
+        else:
+            Gtk.main_quit()
     elif message.type == Gst.MessageType.WARNING:
         err, debug = message.parse_warning()
         sys.stderr.write('Warning: %s: %s\n' % (err, debug))
@@ -201,20 +213,21 @@
 
     return Gst.FlowReturn.OK
 
-def run_gen(render_overlay_gen, *, source, downscale, display):
+def run_gen(render_overlay_gen, *, source, downscale, loop, display):
     inference_size = render_overlay_gen.send(None)  # Initialize.
     return run(inference_size,
         lambda tensor, layout, command:
             render_overlay_gen.send((tensor, layout, command)),
         source=source,
         downscale=downscale,
+        loop=loop,
         display=display)
 
-def run(inference_size, render_overlay, *, source, downscale, display):
+def run(inference_size, render_overlay, *, source, downscale, loop, display):
     result = get_pipeline(source, inference_size, downscale, display)
     if result:
         layout, pipeline = result
-        run_pipeline(pipeline, layout, render_overlay, display)
+        run_pipeline(pipeline, layout, loop, render_overlay, display)
         return True
 
     return False
@@ -256,9 +269,7 @@
 def quit():
     Gtk.main_quit()
 
-def run_pipeline(pipeline, layout, render_overlay, display, handle_sigint=True, signals=None):
-    signals = signals or {}
-
+def run_pipeline(pipeline, layout, loop, render_overlay, display, handle_sigint=True, signals=None):
     # Create pipeline
     pipeline = describe(pipeline)
     print(pipeline)
@@ -305,7 +316,7 @@
                 layout=layout,
                 images=images,
                 get_command=get_command)},
-            **signals
+            **(signals or {})
         }
 
         for name, signals in signals.items():
@@ -317,7 +328,7 @@
         # Set up a pipeline bus watch to catch errors.
         bus = pipeline.get_bus()
         bus.add_signal_watch()
-        bus.connect('message', on_bus_message)
+        bus.connect('message', on_bus_message, pipeline, loop)
 
         # Handle signals.
         if handle_sigint: