Enable gl plugins
Enable gl plugin {glbox, glsvgoverlay*} elements. Sync the
implementation to the coral/pacakges/edgetpuvision repo.
glsvgoverlay* element failed on the IS_DMABUF check (b/161188385)
glbox works and is used in the pipeline for scaling and colorconversion
Update the detection bbox drawing to match the glbox behavior
Change-Id: I2f4036c052f887e1626f33cdadca57cee594a9c1
diff --git a/debian/control b/debian/control
index d40115c..c413cba 100644
--- a/debian/control
+++ b/debian/control
@@ -18,12 +18,14 @@
gstreamer1.0-plugins-bad,
gstreamer1.0-plugins-good,
gstreamer1.0-plugins-ugly,
+ gstreamer1.0-python3-plugin-loader,
mdpd,
python3-cairo,
python3-edgetpu,
python3-gi,
python3-gi-cairo,
python3-gst-1.0,
+ python3-opengl,
python3-numpy,
python3-pil,
python3-protobuf,
diff --git a/debian/copyright b/debian/copyright
index e83d9af..adf13b3 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -1,6 +1,6 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: python3-edgetpuvision
-Source: https://coral.withgoogle.com
+Source: https://coral.ai
Files: *
Copyright: Copyright 2018 Google, LLC <coral-support@google.com>
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index be30314..6c14b82 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -48,8 +48,10 @@
BBox = collections.namedtuple('BBox', ('x', 'y', 'w', 'h'))
BBox.area = lambda self: self.w * self.h
-BBox.scale = lambda self, sx, sy: BBox(x=self.x * sx, y=self.y * sy,
- w=self.w * sx, h=self.h * sy)
+BBox.scale = lambda self, sx, sy, ss: BBox(x=self.x * ss - (ss - sx) / 2,
+ y=self.y * ss - (ss - sy) / 2,
+ w=self.w * ss,
+ h=self.h * ss)
BBox.__str__ = lambda self: 'BBox(x=%.2f y=%.2f w=%.2f h=%.2f)' % self
Object = collections.namedtuple('Object', ('id', 'label', 'score', 'bbox'))
@@ -94,7 +96,7 @@
else:
caption = '%d%%' % percent
- x, y, w, h = obj.bbox.scale(*layout.render_size)
+ x, y, w, h = obj.bbox.scale(*layout.render_size, max(width, height))
color = get_color(obj.id)
doc += svg.Rect(x=x, y=y, width=w, height=h,
diff --git a/edgetpuvision/pipelines.py b/edgetpuvision/pipelines.py
index edf2678..38cd170 100644
--- a/edgetpuvision/pipelines.py
+++ b/edgetpuvision/pipelines.py
@@ -22,7 +22,7 @@
Caps('video/x-raw,framerate=25/1'),
Filter('videoconvert'),
Caps('video/x-raw,format=I420'),
- Filter('glfilterbin filter=glcolorscale'),
+ Filter('glfilterbin filter=glbox'),
Caps('video/x-raw,width=1280,height=720,format=BGRA'),
]
@@ -33,7 +33,7 @@
Filter('decodebin'),
Filter('videoconvert'),
Caps('video/x-raw,format=I420'),
- Filter('glfilterbin filter=glcolorscale'),
+ Filter('glfilterbin filter=glbox'),
Caps('video/x-raw,height=720,format=BGRA'),
]
@@ -44,7 +44,7 @@
Caps('image/jpeg', width=fmt.size.width, height=fmt.size.height,
framerate='%d/%d' % fmt.framerate),
Filter('decodebin'),
- Filter('glfilterbin filter=glcolorscale'),
+ Filter('glfilterbin filter=glbox'),
Caps('video/x-raw', height=720, format='BGRA'),
]
@@ -60,9 +60,8 @@
def inference_pipeline(layout, stillimage=False):
size = max_inner_size(layout.render_size, layout.inference_size)
return [
- Filter('v4l2convert'),
- Caps('video/x-raw,format=RGB,width=%d,height=%d,pixel-aspect-ratio=1/1' % (
- layout.inference_size.width, layout.inference_size.height)),
+ Filter('glfilterbin', filter='glbox'),
+ Caps('video/x-raw', format='RGB', width=layout.inference_size.width, height=layout.inference_size.height),
Sink('app', name='appsink', emit_signals=True, max_buffers=1, drop=True, sync=False),
]
@@ -70,7 +69,6 @@
def image_display_pipeline(filename, layout):
return (
[decoded_img_file_src(filename),
- Filter('videoconvert'),
Filter('imagefreeze'),
Caps('video/x-raw', framerate='30/1'),
Tee(name='t')],
@@ -111,20 +109,22 @@
def image_headless_pipeline(filename, layout):
return (
[decoded_img_file_src(filename),
- Filter('videoconvert'),
Filter('imagefreeze'),
+ Filter('glupload'),
inference_pipeline(layout)],
)
def video_headless_pipeline(filename, layout):
return (
[decoded_file_src(filename),
+ Filter('glupload'),
inference_pipeline(layout)],
)
def camera_headless_pipeline(fmt, layout):
return (
[v4l2_src(fmt),
+ Filter('glupload'),
inference_pipeline(layout)],
)
diff --git a/plugins/glbox.py b/plugins/glbox.py
index 1f44039..21b6f3b 100644
--- a/plugins/glbox.py
+++ b/plugins/glbox.py
@@ -27,11 +27,11 @@
from OpenGL.arrays.arraydatatype import ArrayDatatype
from OpenGL.GLES3 import (
glActiveTexture, glBindBuffer, glBindTexture, glBindVertexArray, glBufferData, glDeleteBuffers,
- glDeleteVertexArrays, glDrawElements, glEnableVertexAttribArray, glGenBuffers,
- glGenVertexArrays, glVertexAttribPointer)
+ glClear, glClearColor, glDeleteVertexArrays, glDrawElements, glEnableVertexAttribArray,
+ glGenBuffers, glGenVertexArrays, glVertexAttribPointer)
from OpenGL.GLES3 import (
- GL_ARRAY_BUFFER, GL_ELEMENT_ARRAY_BUFFER, GL_FALSE, GL_FLOAT, GL_STATIC_DRAW, GL_TEXTURE0,
- GL_TEXTURE_2D, GL_TRIANGLES, GL_UNSIGNED_SHORT, GL_VERTEX_SHADER)
+ GL_ARRAY_BUFFER, GL_COLOR_BUFFER_BIT,GL_ELEMENT_ARRAY_BUFFER, GL_FALSE, GL_FLOAT,
+ GL_STATIC_DRAW, GL_TEXTURE0, GL_TEXTURE_2D, GL_TRIANGLES, GL_UNSIGNED_SHORT, GL_VERTEX_SHADER)
SINK_CAPS = 'video/x-raw(memory:GLMemory),format=RGBA,width=[1,{max_int}],height=[1,{max_int}],texture-target=2D'
@@ -272,6 +272,10 @@
return True
def do_render(self, filter, in_tex):
+ # Black borders.
+ glClearColor(0.0, 0.0, 0.0, 0.0)
+ glClear(GL_COLOR_BUFFER_BIT)
+
glBindVertexArray(self.vao_id)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, in_tex.tex_id)
diff --git a/plugins/glsvgoverlaysink.py b/plugins/glsvgoverlaysink.py
index 75957df..25de535 100644
--- a/plugins/glsvgoverlaysink.py
+++ b/plugins/glsvgoverlaysink.py
@@ -261,6 +261,32 @@
'none',
GObject.ParamFlags.WRITABLE
),
+ 'qos': (bool,
+ 'Generate Quality-of-Service events upstream',
+ 'Generate Quality-of-Service events upstream',
+ True,
+ GObject.ParamFlags.READWRITE
+ ),
+ 'sync': (bool,
+ 'Sync on the clock',
+ 'Sync on the clock',
+ True,
+ GObject.ParamFlags.READWRITE
+ ),
+ 'max-lateness': (int,
+ 'Maximum frame lateness (ns)',
+ 'Maximum number of nanoseconds that a buffer can be late before it is dropped (-1 unlimited)',
+ -1,
+ GLib.MAXINT,
+ 20000000,
+ GObject.ParamFlags.READWRITE
+ ),
+ 'fullscreen': (bool,
+ 'Fullscreen',
+ 'Requests that internally created windows are fullscreen',
+ False,
+ GObject.ParamFlags.READWRITE
+ ),
}
__gsignals__ = {
'drawn': (GObject.SignalFlags.RUN_LAST, None, ())
@@ -413,7 +439,7 @@
self.glimagesink.set_property(prop.name, value)
def do_get_property(self, prop):
- return self.glimagesink.get_property(prop)
+ return self.glimagesink.get_property(prop.name)
def init_gl(self, glcontext):
assert not self.shader
@@ -655,4 +681,4 @@
return gst_context
-__gstelementfactory__ = ("glsvgoverlaysink", Gst.Rank.NONE, GlSvgOverlaySink)
+__gstelementfactory__ = ('glsvgoverlaysink', Gst.Rank.NONE, GlSvgOverlaySink)
diff --git a/setup.py b/setup.py
index 894b6df..1819dbb 100644
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
long_description='API to run inference on image data coming from the camera.',
author='Coral',
author_email='coral-support@google.com',
- url="https://coral.withgoogle.com/",
+ url="https://coral.ai/",
license='Apache 2',
packages=find_packages(),
include_package_data=True,