Move gstreamer pipelines into a separate file.
Change-Id: I6e688f79dac854544cfe8fd18d24623de9b852d5
diff --git a/edgetpuvision/camera.py b/edgetpuvision/camera.py
index bcaa9b3..61f83ee 100644
--- a/edgetpuvision/camera.py
+++ b/edgetpuvision/camera.py
@@ -4,6 +4,8 @@
import numpy as np
from . import gstreamer
+from . import pipelines
+
from .gst import *
class Camera:
@@ -60,7 +62,7 @@
self._filename = filename
def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
- return gstreamer.file_streaming_pipeline(self._filename, self._render_size, self._inference_size)
+ return pipelines.video_streaming_pipeline(self._filename, self._render_size, self._inference_size)
class V4L2Camera(Camera):
def __init__(self, fmt, inference_size):
@@ -69,8 +71,8 @@
def make_pipeline(self, fmt, profile, inline_headers, bitrate, intra_period):
return (
- gstreamer.v4l2_camera(self._fmt),
- gstreamer.camera_streaming_pipeline(profile, bitrate, self._render_size, self._inference_size)
+ pipelines.v4l2_camera(self._fmt),
+ pipelines.camera_streaming_pipeline(profile, bitrate, self._render_size, self._inference_size)
)
def make_camera(source, inference_size):
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index d2ebe95..7266845 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -28,6 +28,7 @@
from PIL import Image
from .gst import *
+from .pipelines import *
COMMAND_SAVE_FRAME = ' '
COMMAND_PRINT_INFO = 'p'
@@ -73,140 +74,6 @@
commands.put(None)
thread.join()
-def sink(fullscreen, sync=False):
- return Filter('kmssink' if fullscreen else 'waylandsink', sync=sync),
-
-def inference_pipeline(render_size, inference_size):
- size = max_inner_size(render_size, inference_size)
- return (
- Filter('glfilterbin', filter='glcolorscale'),
- Caps('video/x-raw', format='RGBA', width=size.width, height=size.height),
- Filter('videoconvert'),
- Caps('video/x-raw', format='RGB', width=size.width, height=size.height),
- Filter('videobox', autocrop=True),
- Caps('video/x-raw', width=inference_size.width, height=inference_size.height),
- Filter('appsink', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False)
- )
-
-def image_file_pipeline(filename, render_size, inference_size, fullscreen):
- size = max_inner_size(render_size, inference_size)
- return (
- Filter('filesrc', location=filename),
- Filter('decodebin'),
- Filter('videoconvert'),
- Caps('video/x-raw', format='RGB'),
- Filter('imagefreeze'),
- Tee(pads=((
- Queue(max_size_buffers=1),
- Filter('videoconvert'),
- Filter('videoscale'),
- Filter('rsvgoverlay', name='overlay'),
- Caps('video/x-raw', width=render_size.width, height=render_size.height),
- sink(fullscreen),
- ),(
- Queue(max_size_buffers=1),
- Filter('videoconvert'),
- Filter('videoscale'),
- Caps('video/x-raw', width=size.width, height=size.height),
- Filter('videobox', autocrop=True),
- Caps('video/x-raw', width=inference_size.width, height=inference_size.height),
- Filter('appsink', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False)
- )))
- )
-
-def video_file_pipeline(filename, render_size, inference_size, fullscreen):
- return (
- Filter('filesrc', location=filename),
- Filter('qtdemux'),
- Filter('h264parse'),
- Filter('vpudec'),
- Filter('glupload'),
- Tee(pads=((
- Queue(max_size_buffers=1),
- Filter('glfilterbin', filter='glcolorscale'),
- Filter('rsvgoverlay', name='overlay'),
- Caps('video/x-raw', width=render_size.width, height=render_size.height),
- sink(fullscreen),
- ),(
- Queue(max_size_buffers=1, leaky='downstream'),
- inference_pipeline(render_size, inference_size),
- )))
- )
-
-# v4l2-ctl --list-formats-ext --device /dev/video1
-def v4l2_camera(fmt):
- return (
- Filter('v4l2src', device=fmt.device),
- Caps('video/x-raw', format=fmt.pixel, width=fmt.size.width, height=fmt.size.height,
- framerate='%d/%d' % fmt.framerate),
- )
-
-def video_camera_pipeline(render_size, inference_size, fullscreen):
- return (
- # TODO(dkovalev): Queue(max_size_buffers=1, leaky='downstream'),
- Filter('glupload'),
- Tee(pads=((
- Queue(max_size_buffers=1, leaky='downstream'),
- Filter('glfilterbin', filter='glcolorscale'),
- Filter('rsvgoverlay', name='overlay'),
- sink(fullscreen),
- ),(
- Queue(max_size_buffers=1, leaky='downstream'),
- inference_pipeline(render_size, inference_size),
- )))
- )
-
-def h264sink(display_decoded=False):
- appsink = Filter('appsink', name='h264sink', emit_signals=True, max_buffers=1, drop=False, sync=False),
-
- if display_decoded:
- return Tee(pads=(
- (Queue(), appsink),
- (Queue(), Filter('vpudec'), Filter('kmssink', sync=False))
- ))
-
- return appsink
-
-def file_streaming_pipeline(filename, render_size, inference_size):
- return (
- Filter('filesrc', location=filename),
- Filter('qtdemux'),
- Tee(pads=((
- Queue(max_size_buffers=1),
- Filter('h264parse'),
- Filter('vpudec'),
- inference_pipeline(render_size, inference_size),
- ), (
- Queue(max_size_buffers=1),
- Filter('h264parse'),
- Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
- h264sink()
- )))
- )
-
-def camera_streaming_pipeline(profile, bitrate, render_size, inference_size):
- size = max_inner_size(render_size, inference_size)
- return (
- Tee(pads=((
- Queue(),
- inference_pipeline(render_size, inference_size)
- ), (
- Queue(max_size_buffers=1, leaky='downstream'),
- Filter('videoconvert'),
- Filter('x264enc',
- speed_preset='ultrafast',
- tune='zerolatency',
- threads=4,
- key_int_max=5,
- bitrate=int(bitrate / 1000), # kbit per second.
- aud=False),
- Caps('video/x-h264', profile=profile),
- Filter('h264parse'),
- Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
- h264sink()
- )))
- )
-
def save_frame(rgb, size, overlay=None, ext='png'):
tag = '%010d' % int(time.monotonic() * 1000)
img = Image.frombytes('RGB', size, rgb, 'raw')
@@ -338,7 +205,6 @@
return Gst.FlowReturn.OK
-
def run_gen(render_overlay_gen, *, source, downscale, fullscreen):
inference_size = render_overlay_gen.send(None) # Initialize.
return run(inference_size,
@@ -375,22 +241,20 @@
camera = v4l2_camera(fmt)
caps = next(x for x in camera if isinstance(x, Caps))
render_size = Size(caps.width, caps.height)
- pipeline = camera + video_camera_pipeline(render_size, inference_size, fullscreen)
+ pipeline = camera + camera_display_pipeline(render_size, inference_size, fullscreen)
return run_loop(pipeline, inference_size, render_size, render_overlay)
-
def run_file(inference_size, render_overlay, *, filename, downscale, fullscreen):
inference_size = Size(*inference_size)
info = get_video_info(filename)
render_size = Size(info.get_width(), info.get_height()) / downscale
if info.is_image():
- pipeline = image_file_pipeline(filename, render_size, inference_size, fullscreen)
+ pipeline = image_display_pipeline(filename, render_size, inference_size, fullscreen)
else:
- pipeline = video_file_pipeline(filename, render_size, inference_size, fullscreen)
+ pipeline = video_display_pipeline(filename, render_size, inference_size, fullscreen)
return run_loop(pipeline, inference_size, render_size, render_overlay)
-
def run_loop(pipeline, inference_size, render_size, render_overlay):
loop = GLib.MainLoop()
commands = queue.Queue()
diff --git a/edgetpuvision/pipelines.py b/edgetpuvision/pipelines.py
new file mode 100644
index 0000000..4c20d63
--- /dev/null
+++ b/edgetpuvision/pipelines.py
@@ -0,0 +1,134 @@
+from .gst import *
+
+def v4l2_camera(fmt):
+ return (
+ Filter('v4l2src', device=fmt.device),
+ Caps('video/x-raw', format=fmt.pixel, width=fmt.size.width, height=fmt.size.height,
+ framerate='%d/%d' % fmt.framerate),
+ )
+
+def display_sink(fullscreen, sync=False):
+ return Filter('kmssink' if fullscreen else 'waylandsink', sync=sync),
+
+def h264_sink(display_decoded=False):
+ appsink = Filter('appsink', name='h264sink', emit_signals=True, max_buffers=1, drop=False, sync=False),
+
+ if display_decoded:
+ return Tee(pads=(
+ (Queue(), appsink),
+ (Queue(), Filter('vpudec'), Filter('kmssink', sync=False))
+ ))
+
+ return appsink
+
+def inference_pipeline(render_size, inference_size):
+ size = max_inner_size(render_size, inference_size)
+ return (
+ Filter('glfilterbin', filter='glcolorscale'),
+ Caps('video/x-raw', format='RGBA', width=size.width, height=size.height),
+ Filter('videoconvert'),
+ Caps('video/x-raw', format='RGB', width=size.width, height=size.height),
+ Filter('videobox', autocrop=True),
+ Caps('video/x-raw', width=inference_size.width, height=inference_size.height),
+ Filter('appsink', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False)
+ )
+
+def image_display_pipeline(filename, render_size, inference_size, fullscreen):
+ size = max_inner_size(render_size, inference_size)
+ return (
+ Filter('filesrc', location=filename),
+ Filter('decodebin'),
+ Filter('videoconvert'),
+ Caps('video/x-raw', format='RGB'),
+ Filter('imagefreeze'),
+ Tee(pads=((
+ Queue(max_size_buffers=1),
+ Filter('videoconvert'),
+ Filter('videoscale'),
+ Filter('rsvgoverlay', name='overlay'),
+ Caps('video/x-raw', width=render_size.width, height=render_size.height),
+ display_sink(fullscreen),
+ ),(
+ Queue(max_size_buffers=1),
+ Filter('videoconvert'),
+ Filter('videoscale'),
+ Caps('video/x-raw', width=size.width, height=size.height),
+ Filter('videobox', autocrop=True),
+ Caps('video/x-raw', width=inference_size.width, height=inference_size.height),
+ Filter('appsink', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False)
+ )))
+ )
+
+def video_display_pipeline(filename, render_size, inference_size, fullscreen):
+ return (
+ Filter('filesrc', location=filename),
+ Filter('qtdemux'),
+ Filter('h264parse'),
+ Filter('vpudec'),
+ Filter('glupload'),
+ Tee(pads=((
+ Queue(max_size_buffers=1),
+ Filter('glfilterbin', filter='glcolorscale'),
+ Filter('rsvgoverlay', name='overlay'),
+ Caps('video/x-raw', width=render_size.width, height=render_size.height),
+ display_sink(fullscreen),
+ ),(
+ Queue(max_size_buffers=1, leaky='downstream'),
+ inference_pipeline(render_size, inference_size),
+ )))
+ )
+
+def camera_display_pipeline(render_size, inference_size, fullscreen):
+ return (
+ # TODO(dkovalev): Queue(max_size_buffers=1, leaky='downstream'),
+ Filter('glupload'),
+ Tee(pads=((
+ Queue(max_size_buffers=1, leaky='downstream'),
+ Filter('glfilterbin', filter='glcolorscale'),
+ Filter('rsvgoverlay', name='overlay'),
+ display_sink(fullscreen),
+ ),(
+ Queue(max_size_buffers=1, leaky='downstream'),
+ inference_pipeline(render_size, inference_size),
+ )))
+ )
+
+def video_streaming_pipeline(filename, render_size, inference_size):
+ return (
+ Filter('filesrc', location=filename),
+ Filter('qtdemux'),
+ Tee(pads=((
+ Queue(max_size_buffers=1),
+ Filter('h264parse'),
+ Filter('vpudec'),
+ inference_pipeline(render_size, inference_size),
+ ), (
+ Queue(max_size_buffers=1),
+ Filter('h264parse'),
+ Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
+ h264_sink()
+ )))
+ )
+
+def camera_streaming_pipeline(profile, bitrate, render_size, inference_size):
+ size = max_inner_size(render_size, inference_size)
+ return (
+ Tee(pads=((
+ Queue(),
+ inference_pipeline(render_size, inference_size)
+ ), (
+ Queue(max_size_buffers=1, leaky='downstream'),
+ Filter('videoconvert'),
+ Filter('x264enc',
+ speed_preset='ultrafast',
+ tune='zerolatency',
+ threads=4,
+ key_int_max=5,
+ bitrate=int(bitrate / 1000), # kbit per second.
+ aud=False),
+ Caps('video/x-h264', profile=profile),
+ Filter('h264parse'),
+ Caps('video/x-h264', stream_format='byte-stream', alignment='nal'),
+ h264_sink()
+ )))
+ )
\ No newline at end of file