Remove code redundancy and group some params together.
Change-Id: I08ac93ed60ab5b7d0a57b3fa2ef1649b79af7ce7
diff --git a/edgetpuvision/camera.py b/edgetpuvision/camera.py
index 4bc6249..39ef23e 100644
--- a/edgetpuvision/camera.py
+++ b/edgetpuvision/camera.py
@@ -26,16 +26,14 @@
pass
def start_recording(self, obj, format, profile, inline_headers, bitrate, intra_period):
- size = min_outer_size(self._inference_size, self._render_size)
- window = center_inside(self._render_size, size)
- fps_counter = gstreamer.avg_fps_counter(30)
+ layout = gstreamer.make_layout(self._inference_size, self._render_size)
def on_buffer(data, _):
obj.write(data)
def on_image(data, _):
if self.on_image:
- self.on_image(np.frombuffer(data, dtype=np.uint8), next(fps_counter), size, window)
+ self.on_image(np.frombuffer(data, dtype=np.uint8), layout)
signals = {
'h264sink': {'new-sample': gstreamer.new_sample_callback(on_buffer)},
diff --git a/edgetpuvision/classify.py b/edgetpuvision/classify.py
index 8b6acd0..0a5c145 100644
--- a/edgetpuvision/classify.py
+++ b/edgetpuvision/classify.py
@@ -14,7 +14,7 @@
from edgetpu.classification.engine import ClassificationEngine
from . import overlays
-from .utils import load_labels, input_image_size, same_input_image_sizes
+from .utils import load_labels, input_image_size, same_input_image_sizes, avg_fps_counter
from .gstreamer import Display, run_gen
@@ -41,6 +41,8 @@
acc = accumulator(size=args.window, top_k=args.top_k)
acc.send(None) # Initialize.
+ fps_counter=avg_fps_counter(30)
+
engines = [ClassificationEngine(m) for m in args.model.split(',')]
assert same_input_image_sizes(engines)
engines = itertools.cycle(engines)
@@ -53,8 +55,9 @@
output = None
while True:
- tensor, size, window, inference_rate, command = (yield output)
+ tensor, layout, command = (yield output)
+ inference_rate = next(fps_counter)
if draw_overlay:
start = time.monotonic()
results = engine.ClassifyWithInputTensor(tensor, threshold=args.threshold, top_k=args.top_k)
@@ -65,7 +68,7 @@
if args.print:
print_results(inference_rate, results)
- output = overlays.classification(results, inference_time, inference_rate, size, window)
+ output = overlays.classification(results, inference_time, inference_rate, layout)
else:
output = None
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index a3820d4..ed102e9 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -19,7 +19,7 @@
from . import overlays
-from .utils import load_labels, input_image_size, same_input_image_sizes
+from .utils import load_labels, input_image_size, same_input_image_sizes, avg_fps_counter
from .gstreamer import Display, run_gen
def area(obj):
@@ -34,6 +34,8 @@
print(' %d: label=%s, bbox=(%.2f %.2f %.2f %.2f), bbox_area=%.2f' % x)
def render_gen(args):
+ fps_counter=avg_fps_counter(30)
+
engines = [DetectionEngine(m) for m in args.model.split(',')]
assert same_input_image_sizes(engines)
engines = itertools.cycle(engines)
@@ -47,7 +49,9 @@
output = None
while True:
- tensor, size, window, inference_rate, command = (yield output)
+ tensor, layout, command = (yield output)
+
+ inference_rate = next(fps_counter)
if draw_overlay:
start = time.monotonic()
objs = engine.DetectWithInputTensor(tensor, threshold=args.threshold, top_k=args.top_k)
@@ -61,7 +65,7 @@
if args.print:
print_results(inference_rate, objs, labels)
- output = overlays.detection(objs, labels, inference_time, inference_rate, size, window)
+ output = overlays.detection(objs, labels, inference_time, inference_rate, layout)
else:
output = None
diff --git a/edgetpuvision/gstreamer.py b/edgetpuvision/gstreamer.py
index 80b68a7..352a95e 100644
--- a/edgetpuvision/gstreamer.py
+++ b/edgetpuvision/gstreamer.py
@@ -96,16 +96,13 @@
f.write(overlay)
print('Overlay saved as "%s"' % name)
-def avg_fps_counter(window_size):
- window = collections.deque(maxlen=window_size)
- prev = time.monotonic()
- yield 0.0 # First fps value.
- while True:
- curr = time.monotonic()
- window.append(curr - prev)
- prev = curr
- yield len(window) / sum(window)
+Layout = collections.namedtuple('Layout', ('size', 'window'))
+
+def make_layout(inference_size, render_size):
+ size = min_outer_size(inference_size, render_size)
+ window = center_inside(render_size, size)
+ return Layout(size=size, window=window)
def caps_size(caps):
structure = caps.get_structure(0)
@@ -186,9 +183,8 @@
commands.put(ch)
return True
-def on_new_sample(sink, pipeline, render_overlay, render_size, images, commands, fps_counter):
+def on_new_sample(sink, pipeline, render_overlay, render_size, images, commands):
with pull_sample(sink) as (sample, data):
- inference_rate = next(fps_counter)
custom_command = None
save_frame = False
@@ -197,14 +193,12 @@
save_frame = True
elif command == COMMAND_PRINT_INFO:
print('Timestamp: %.2f' % time.monotonic())
- print('Inference FPS: %s' % inference_rate)
print('Render size: %d x %d' % render_size)
print('Inference size: %d x %d' % caps_size(sample.get_caps()))
else:
custom_command = command
svg = render_overlay(np.frombuffer(data, dtype=np.uint8),
- inference_rate=inference_rate,
command=custom_command)
overlay = pipeline.get_by_name('overlay')
if overlay:
@@ -218,8 +212,8 @@
def run_gen(render_overlay_gen, *, source, downscale, display):
inference_size = render_overlay_gen.send(None) # Initialize.
return run(inference_size,
- lambda tensor, size, window, inference_rate, command:
- render_overlay_gen.send((tensor, size, window, inference_rate, command)),
+ lambda tensor, layout, command:
+ render_overlay_gen.send((tensor, layout, command)),
source=source,
downscale=downscale,
display=display)
@@ -285,18 +279,13 @@
GLib.io_add_watch(sys.stdin.fileno(), GLib.IO_IN, on_keypress, commands)
stack.enter_context(term_raw_mode(sys.stdin.fileno()))
- size = min_outer_size(inference_size, render_size)
- window = center_inside(render_size, size)
-
run_pipeline(loop, pipeline, {'appsink': {'new-sample':
functools.partial(on_new_sample,
render_overlay=functools.partial(render_overlay,
- size=size,
- window=window),
+ layout=make_layout(inference_size, render_size)),
render_size=render_size,
images=images,
- commands=commands,
- fps_counter=avg_fps_counter(30))}
+ commands=commands)}
})
while GLib.MainContext.default().iteration(False):
diff --git a/edgetpuvision/overlays.py b/edgetpuvision/overlays.py
index bc7ce51..cfdb458 100644
--- a/edgetpuvision/overlays.py
+++ b/edgetpuvision/overlays.py
@@ -12,8 +12,8 @@
int((x1 - x0) * width), int((y1 - y0) * height)
-def classification(results, inference_time, inference_rate, size, window):
- x0, y0, w, h = window
+def classification(results, inference_time, inference_rate, layout):
+ x0, y0, w, h = layout.window
lines = [
'Inference time: %.2f ms (%.2f fps)' % (inference_time * 1000, 1.0 / inference_time),
@@ -26,19 +26,18 @@
defs = svg.Defs()
defs += CSS_STYLES
- doc = svg.Svg(width=w, height=h, viewBox='%s %s %s %s' % window, font_size='26px')
+ doc = svg.Svg(width=w, height=h, viewBox='%s %s %s %s' % layout.window, font_size='26px')
doc += defs
doc += svg.normal_text(lines, x=x0 + 10, y=y0 + 10, font_size_em=1.1)
return str(doc)
-
-def detection(objs, labels, inference_time, inference_rate, size, window):
- x0, y0, w, h = window
+def detection(objs, labels, inference_time, inference_rate, layout):
+ x0, y0, w, h = layout.window
defs = svg.Defs()
defs += CSS_STYLES
- doc = svg.Svg(width=w, height=h, viewBox='%s %s %s %s' % window, font_size='26px')
+ doc = svg.Svg(width=w, height=h, viewBox='%s %s %s %s' % layout.window, font_size='26px')
doc += defs
doc += svg.normal_text((
'Inference time: %.2f ms (%.2f fps)' % (inference_time * 1000, 1.0 / inference_time),
@@ -53,7 +52,7 @@
else:
caption = '%d%%' % percent
- x, y, w, h = _normalize_rect(obj.bounding_box.flatten().tolist(), size)
+ x, y, w, h = _normalize_rect(obj.bounding_box.flatten().tolist(), layout.size)
doc += svg.normal_text(caption, x, y - 5)
doc += svg.Rect(x=x, y=y, width=w, height=h, rx=2, ry=2)
diff --git a/edgetpuvision/server.py b/edgetpuvision/server.py
index 95f2e3c..e1e35da 100644
--- a/edgetpuvision/server.py
+++ b/edgetpuvision/server.py
@@ -20,8 +20,8 @@
assert camera is not None
with StreamingServer(camera) as server:
- def on_image(tensor, inference_rate, size, window):
- overlay = gen.send((tensor, size, window, inference_rate, None))
+ def on_image(tensor, layout):
+ overlay = gen.send((tensor, layout, None))
server.send_overlay(overlay)
camera.on_image = on_image
diff --git a/edgetpuvision/utils.py b/edgetpuvision/utils.py
index 9495973..8de053e 100644
--- a/edgetpuvision/utils.py
+++ b/edgetpuvision/utils.py
@@ -1,4 +1,6 @@
+import collections
import re
+import time
LABEL_PATTERN = re.compile(r'\s*(\d+)(.+)')
@@ -14,3 +16,14 @@
def same_input_image_sizes(engines):
return len({input_image_size(engine) for engine in engines}) == 1
+
+def avg_fps_counter(window_size):
+ window = collections.deque(maxlen=window_size)
+ prev = time.monotonic()
+ yield 0.0 # First fps value.
+
+ while True:
+ curr = time.monotonic()
+ window.append(curr - prev)
+ prev = curr
+ yield len(window) / sum(window)