Make server scripts functionally compatible with console ones.
Change-Id: I3bc73cddedf8a2254192d1d4f847d70674e29e2c
diff --git a/edgetpuvision/apps.py b/edgetpuvision/apps.py
new file mode 100644
index 0000000..3aa052c
--- /dev/null
+++ b/edgetpuvision/apps.py
@@ -0,0 +1,55 @@
+import argparse
+import logging
+import signal
+
+from .camera import make_camera
+from .gstreamer import Display, run_gen
+from .streaming.server import StreamingServer
+
+from . import svg
+
+EMPTY_SVG = str(svg.Svg())
+
+def run_server(add_render_gen_args, render_gen):
+ logging.basicConfig(level=logging.INFO)
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--source',
+ help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
+ default='/dev/video0:YUY2:1280x720:30/1')
+ parser.add_argument('--bitrate', type=int, default=1000000,
+ help='Video streaming bitrate (bit/s)')
+
+ add_render_gen_args(parser)
+ args = parser.parse_args()
+
+ gen = render_gen(args)
+ camera = make_camera(args.source, next(gen))
+ assert camera is not None
+
+ with StreamingServer(camera, args.bitrate) as server:
+ def render_overlay(tensor, layout, command):
+ overlay = gen.send((tensor, layout, command))
+ server.send_overlay(overlay if overlay else EMPTY_SVG)
+
+ camera.render_overlay = render_overlay
+ signal.pause()
+
+
+def run_app(add_render_gen_args, render_gen):
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--source',
+ help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
+ default='/dev/video0:YUY2:1280x720:30/1')
+ parser.add_argument('--downscale', type=float, default=2.0,
+ help='Downscale factor for .mp4 file rendering')
+ parser.add_argument('--display', type=Display, choices=Display, default=Display.FULLSCREEN,
+ help='Display mode')
+ add_render_gen_args(parser)
+ args = parser.parse_args()
+
+ if not run_gen(render_gen(args),
+ source=args.source,
+ downscale=args.downscale,
+ display=args.display):
+ print('Invalid source argument:', args.source)
diff --git a/edgetpuvision/camera.py b/edgetpuvision/camera.py
index a588425..4b1c914 100644
--- a/edgetpuvision/camera.py
+++ b/edgetpuvision/camera.py
@@ -1,8 +1,6 @@
import os
import threading
-import numpy as np
-
from . import gstreamer
from . import pipelines
@@ -10,12 +8,11 @@
class Camera:
def __init__(self, render_size, inference_size):
- self._layout = gstreamer.make_layout(Size(*inference_size), Size(*render_size))
+ self._layout = gstreamer.make_layout(inference_size, render_size)
self._loop = gstreamer.loop()
self._thread = None
-
- self.on_image = None
+ self.render_overlay = None
@property
def resolution(self):
@@ -28,19 +25,19 @@
def on_buffer(data, _):
obj.write(data)
- def on_image(data, _):
- if self.on_image:
- self.on_image(np.frombuffer(data, dtype=np.uint8), self._layout)
+ def render_overlay(tensor, layout, command):
+ if self.render_overlay:
+ self.render_overlay(tensor, layout, command)
+ return None
signals = {
'h264sink': {'new-sample': gstreamer.new_sample_callback(on_buffer)},
- 'appsink': {'new-sample': gstreamer.new_sample_callback(on_image)},
}
pipeline = self.make_pipeline(format, profile, inline_headers, bitrate, intra_period)
- self._thread = threading.Thread(target=gstreamer.run_pipeline,
- args=(self._loop, pipeline, signals))
+ self._thread = threading.Thread(target=gstreamer.run_loop,
+ args=(self._loop, pipeline, self._layout, render_overlay, signals))
self._thread.start()
def stop_recording(self):
diff --git a/edgetpuvision/classify.py b/edgetpuvision/classify.py
index 8ca2f55..88e6147 100644
--- a/edgetpuvision/classify.py
+++ b/edgetpuvision/classify.py
@@ -14,8 +14,8 @@
from edgetpu.classification.engine import ClassificationEngine
from . import svg
+from .apps import run_app
from .utils import load_labels, input_image_size, same_input_image_sizes, avg_fps_counter
-from .gstreamer import Display, run_gen
CSS_STYLES = str(svg.CssStyle({'.txt': svg.Style(fill='white'),
@@ -114,22 +114,7 @@
help='Print inference results')
def main():
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--source',
- help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
- default='/dev/video0:YUY2:1280x720:30/1')
- parser.add_argument('--downscale', type=float, default=2.0,
- help='Downscale factor for .mp4 file rendering')
- parser.add_argument('--display', type=Display, choices=Display, default=Display.FULLSCREEN,
- help='Display mode')
- add_render_gen_args(parser)
- args = parser.parse_args()
-
- if not run_gen(render_gen(args),
- source=args.source,
- downscale=args.downscale,
- display=args.display):
- print('Invalid source argument:', args.source)
+ run_app(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
diff --git a/edgetpuvision/classify_server.py b/edgetpuvision/classify_server.py
index e5c9f97..c505fa0 100644
--- a/edgetpuvision/classify_server.py
+++ b/edgetpuvision/classify_server.py
@@ -6,11 +6,11 @@
# --model ${TEST_DATA}/mobilenet_v2_1.0_224_inat_bird_quant.tflite \
# --labels ${TEST_DATA}/inat_bird_labels.txt
+from .apps import run_server
from .classify import add_render_gen_args, render_gen
-from .server import run
def main():
- run(add_render_gen_args, render_gen)
+ run_server(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index 7b73166..7ea68e2 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -20,8 +20,8 @@
from edgetpu.detection.engine import DetectionEngine
from . import svg
+from .apps import run_app
from .utils import load_labels, input_image_size, same_input_image_sizes, avg_fps_counter
-from .gstreamer import Display, run_gen
CSS_STYLES = str(svg.CssStyle({'.txt': svg.Style(fill='white'),
'.shd': svg.Style(fill='black', fill_opacity=0.6),
@@ -145,23 +145,7 @@
help='Print inference results')
def main():
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--source',
- help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
- default='/dev/video0:YUY2:1280x720:30/1')
- parser.add_argument('--downscale', type=float, default=2.0,
- help='Downscale factor for video/image file rendering')
- parser.add_argument('--display', type=Display, choices=Display, default=Display.FULLSCREEN,
- help='Display mode')
- add_render_gen_args(parser)
- args = parser.parse_args()
-
- if not run_gen(render_gen(args),
- source=args.source,
- downscale=args.downscale,
- display=args.display):
- print('Invalid source argument:', args.source)
-
+ run_app(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
diff --git a/edgetpuvision/detect_server.py b/edgetpuvision/detect_server.py
index f711ba2..1babc37 100644
--- a/edgetpuvision/detect_server.py
+++ b/edgetpuvision/detect_server.py
@@ -11,11 +11,11 @@
# --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
# --labels ${TEST_DATA}/coco_labels.txt
+from .apps import run_server
from .detect import add_render_gen_args, render_gen
-from .server import run
def main():
- run(add_render_gen_args, render_gen)
+ run_server(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index 874cecf..cc561ca 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -100,6 +100,8 @@
Layout = collections.namedtuple('Layout', ('size', 'window', 'inference_size', 'render_size'))
def make_layout(inference_size, render_size):
+ inference_size = Size(*inference_size)
+ render_size = Size(*render_size)
size = min_outer_size(inference_size, render_size)
window = center_inside(render_size, size)
return Layout(size=size, window=window,
@@ -120,7 +122,7 @@
return streams[0]
def loop():
- return GLib.MainLoop.new(None, False)
+ return GLib.MainLoop()
@contextlib.contextmanager
def pull_sample(sink):
@@ -228,13 +230,12 @@
result = get_pipeline(source, inference_size, downscale, display)
if result:
layout, pipeline = result
- run_loop(pipeline, layout, render_overlay)
+ run_loop(loop(), pipeline, layout, render_overlay)
return True
return False
def get_pipeline(source, inference_size, downscale, display):
- inference_size = Size(*inference_size)
fmt = parse_format(source)
if fmt:
layout = make_layout(inference_size, fmt.size)
@@ -245,7 +246,7 @@
info = get_video_info(filename)
render_size = Size(info.get_width(), info.get_height()) / downscale
layout = make_layout(inference_size, render_size)
- return layout, file_pipline(filename, info, layout, display)
+ return layout, file_pipline(info.is_image(), filename, layout, display)
return None
@@ -255,21 +256,21 @@
else:
return camera_display_pipeline(fmt, layout, display is Display.FULLSCREEN)
-def file_pipline(filename, info, layout, display):
+def file_pipline(is_image, filename, layout, display):
if display is Display.NONE:
- if info.is_image():
+ if is_image:
return image_headless_pipeline(filename, layout)
else:
return video_headless_pipeline(filename, layout)
else:
fullscreen = display is Display.FULLSCREEN
- if info.is_image():
+ if is_image:
return image_display_pipeline(filename, layout, fullscreen)
else:
return video_display_pipeline(filename, layout, fullscreen)
-def run_loop(pipeline, layout, render_overlay):
- loop = GLib.MainLoop()
+def run_loop(loop, pipeline, layout, render_overlay, signals=None):
+ signals = signals or {}
commands = queue.Queue()
with contextlib.ExitStack() as stack:
@@ -280,10 +281,11 @@
GLib.io_add_watch(sys.stdin.fileno(), GLib.IO_IN, on_keypress, commands)
stack.enter_context(term_raw_mode(sys.stdin.fileno()))
- run_pipeline(loop, pipeline, {'appsink': {'new-sample':
- functools.partial(on_new_sample,
+ run_pipeline(loop, pipeline, {'appsink':
+ {'new-sample': functools.partial(on_new_sample,
render_overlay=functools.partial(render_overlay, layout=layout),
layout=layout,
images=images,
- commands=commands)}
+ commands=commands)},
+ **signals
})
diff --git a/edgetpuvision/server.py b/edgetpuvision/server.py
deleted file mode 100644
index bb3c0aa..0000000
--- a/edgetpuvision/server.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import argparse
-import logging
-import signal
-
-from .camera import make_camera
-from .streaming.server import StreamingServer
-
-def run(add_render_gen_args, render_gen):
- logging.basicConfig(level=logging.INFO)
-
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--bitrate', type=int, default=1000000,
- help='Video streaming bitrate (bit/s)')
-
- parser.add_argument('--source',
- help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
- default='/dev/video0:YUY2:1280x720:30/1')
- add_render_gen_args(parser)
- args = parser.parse_args()
-
- gen = render_gen(args)
- camera = make_camera(args.source, next(gen))
- assert camera is not None
-
- with StreamingServer(camera, args.bitrate) as server:
- def on_image(tensor, layout):
- overlay = gen.send((tensor, layout, None))
- server.send_overlay(overlay)
-
- camera.on_image = on_image
- signal.pause()