summaryrefslogtreecommitdiff
path: root/sys/v4l2/gstv4l2allocator.c
diff options
context:
space:
mode:
authorNicolas Dufresne <nicolas.dufresne@collabora.com>2014-04-04 22:35:48 -0400
committerNicolas Dufresne <nicolas.dufresne@collabora.com>2014-05-08 15:56:36 -0400
commitfd13e9e96de440a0ee8d2881ad6d6c77d135427e (patch)
treee9009d6797fc00fcb13d90b495349054ecdee931 /sys/v4l2/gstv4l2allocator.c
parent1b4561cf3523eef7325cce7b6d6e6e074c80dbb1 (diff)
downloadgstreamer-plugins-good-fd13e9e96de440a0ee8d2881ad6d6c77d135427e.tar.gz
Implement V4l2 Allocator
This goal of this allocator is mainly to allow tracking the memory. Currently, when a buffer memory has been modified, the buffer and it's memory is disposed and lost until the stream is restarted.
Diffstat (limited to 'sys/v4l2/gstv4l2allocator.c')
-rw-r--r--sys/v4l2/gstv4l2allocator.c945
1 files changed, 945 insertions, 0 deletions
diff --git a/sys/v4l2/gstv4l2allocator.c b/sys/v4l2/gstv4l2allocator.c
new file mode 100644
index 000000000..6c6886f8f
--- /dev/null
+++ b/sys/v4l2/gstv4l2allocator.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright (C) 2014 Collabora Ltd.
+ * Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "ext/videodev2.h"
+#include "gstv4l2allocator.h"
+#include "v4l2_calls.h"
+
+#include <string.h>
+
+#define GST_V4L2_MEMORY_TYPE "V4l2Memory"
+
+#define gst_v4l2_allocator_parent_class parent_class
+G_DEFINE_TYPE (GstV4l2Allocator, gst_v4l2_allocator, GST_TYPE_ALLOCATOR);
+
+GST_DEBUG_CATEGORY_STATIC (v4l2allocator_debug);
+#define GST_CAT_DEFAULT v4l2allocator_debug
+
+enum
+{
+ GROUP_RELEASED,
+ LAST_SIGNAL
+};
+
+static guint gst_v4l2_allocator_signals[LAST_SIGNAL] = { 0 };
+
+static void gst_v4l2_allocator_release (GstV4l2Allocator * allocator,
+ GstV4l2Memory * mem);
+
+static const gchar *
+memory_type_to_str (guint32 memory)
+{
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ return "mmap";
+ case V4L2_MEMORY_USERPTR:
+ return "userptr";
+ case V4L2_MEMORY_DMABUF:
+ return "dmabuf";
+ default:
+ return "unknown";
+ }
+}
+
+/*************************************/
+/* GstV4lMemory implementation */
+/*************************************/
+
+static gpointer
+_v4l2mem_map (GstV4l2Memory * mem, gsize maxsize, GstMapFlags flags)
+{
+ gpointer data = NULL;
+
+ switch (mem->group->buffer.memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ data = mem->data;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* v4l2 dmabuf memory are not shared with downstream */
+ g_assert_not_reached ();
+ break;
+ default:
+ GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
+ break;
+ }
+ return data;
+}
+
+static gboolean
+_v4l2mem_unmap (GstV4l2Memory * mem)
+{
+ gboolean ret = FALSE;
+
+ switch (mem->group->buffer.memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ ret = TRUE;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* v4l2 dmabuf memory are not share with downstream */
+ g_assert_not_reached ();
+ break;
+ default:
+ GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
+ break;
+ }
+ return ret;
+}
+
+static gboolean
+_v4l2mem_dispose (GstV4l2Memory * mem)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) mem->mem.allocator;
+ GstV4l2MemoryGroup *group = mem->group;
+ gboolean ret;
+
+ if (group->mem[mem->plane]) {
+ gst_memory_ref ((GstMemory *) mem);
+ gst_v4l2_allocator_release (allocator, mem);
+ ret = FALSE;
+ } else {
+ gst_object_ref (allocator);
+ ret = TRUE;
+ }
+
+ return ret;
+}
+
+static void
+_v4l2mem_free (GstV4l2Memory * mem)
+{
+ g_slice_free (GstV4l2Memory, mem);
+}
+
+static inline GstV4l2Memory *
+_v4l2mem_new (GstMemoryFlags flags, GstAllocator * allocator,
+ GstMemory * parent, gsize maxsize, gsize align, gsize offset, gsize size,
+ gint plane, gpointer data, GstV4l2MemoryGroup * group)
+{
+ GstV4l2Memory *mem;
+
+ mem = g_slice_new0 (GstV4l2Memory);
+ gst_memory_init (GST_MEMORY_CAST (mem),
+ flags, allocator, parent, maxsize, align, offset, size);
+
+ if (parent == NULL)
+ mem->mem.mini_object.dispose =
+ (GstMiniObjectDisposeFunction) _v4l2mem_dispose;
+
+ mem->plane = plane;
+ mem->data = data;
+ mem->group = group;
+
+ return mem;
+}
+
+static GstV4l2Memory *
+_v4l2mem_share (GstV4l2Memory * mem, gssize offset, gsize size)
+{
+ GstV4l2Memory *sub;
+ GstMemory *parent;
+
+ /* find the real parent */
+ if ((parent = mem->mem.parent) == NULL)
+ parent = (GstMemory *) mem;
+
+ if (size == -1)
+ size = mem->mem.size - offset;
+
+ /* the shared memory is always readonly */
+ sub = _v4l2mem_new (GST_MINI_OBJECT_FLAGS (parent) |
+ GST_MINI_OBJECT_FLAG_LOCK_READONLY, mem->mem.allocator, parent,
+ mem->mem.maxsize, mem->mem.align, offset, size, mem->plane, mem->data,
+ mem->group);
+
+ return sub;
+}
+
+static gboolean
+_v4l2mem_is_span (GstV4l2Memory * mem1, GstV4l2Memory * mem2, gsize * offset)
+{
+ if (offset)
+ *offset = mem1->mem.offset - mem1->mem.parent->offset;
+
+ /* and memory is contiguous */
+ return mem1->mem.offset + mem1->mem.size == mem2->mem.offset;
+}
+
+gboolean
+gst_is_v4l2_memory (GstMemory * mem)
+{
+ return gst_memory_is_type (mem, GST_V4L2_MEMORY_TYPE);
+}
+
+
+/*************************************/
+/* GstV4l2MemoryGroup implementation */
+/*************************************/
+
+static void
+gst_v4l2_memory_group_free (GstV4l2MemoryGroup * group)
+{
+ gint i;
+
+ for (i = 0; i < group->n_mem; i++) {
+ GstMemory *mem = group->mem[i];
+ group->mem[i] = NULL;
+ if (mem)
+ gst_memory_unref (mem);
+ }
+
+ g_slice_free (GstV4l2MemoryGroup, group);
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_memory_group_new (GstV4l2Allocator * allocator, guint32 index)
+{
+ gint video_fd = allocator->video_fd;
+ guint32 memory = allocator->memory;
+ struct v4l2_format *format = &allocator->format;
+ GstV4l2MemoryGroup *group;
+ gsize img_size, buf_size;
+
+ group = g_slice_new0 (GstV4l2MemoryGroup);
+
+ group->buffer.type = format->type;
+ group->buffer.index = index;
+ group->buffer.memory = memory;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
+ group->n_mem = group->buffer.length = format->fmt.pix_mp.num_planes;
+ group->buffer.m.planes = group->planes;
+ } else {
+ group->n_mem = 1;
+ }
+
+ if (v4l2_ioctl (video_fd, VIDIOC_QUERYBUF, &group->buffer) < 0)
+ goto querybuf_failed;
+
+ /* Check that provided size matches the format we have negotiation. Failing
+ * there usually means a driver of libv4l bug. */
+ if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
+ gint i;
+
+ for (i = 0; i < group->n_mem; i++) {
+ img_size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ buf_size = group->planes[i].length;
+ if (buf_size < img_size)
+ goto buffer_too_short;
+ }
+ } else {
+ img_size = allocator->format.fmt.pix.sizeimage;
+ buf_size = group->buffer.length;
+ if (buf_size < img_size)
+ goto buffer_too_short;
+ }
+
+ /* We save non planar buffer information into the multi-planar plane array
+ * to avoid duplicating the code later */
+ if (!V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
+ group->planes[0].bytesused = group->buffer.bytesused;
+ group->planes[0].length = group->buffer.length;
+ g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
+ memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
+ }
+
+ GST_LOG_OBJECT (allocator, "Got %s buffer", memory_type_to_str (memory));
+ GST_LOG_OBJECT (allocator, " index: %u", group->buffer.index);
+ GST_LOG_OBJECT (allocator, " type: %d", group->buffer.type);
+ GST_LOG_OBJECT (allocator, " flags: %08x", group->buffer.flags);
+ GST_LOG_OBJECT (allocator, " field: %d", group->buffer.field);
+ GST_LOG_OBJECT (allocator, " memory: %d", group->buffer.memory);
+ GST_LOG_OBJECT (allocator, " planes: %d", group->n_mem);
+
+#ifndef GST_DISABLE_GST_DEBUG
+ if (memory == V4L2_MEMORY_MMAP) {
+ gint i;
+ for (i = 0; i < group->n_mem; i++) {
+ GST_LOG_OBJECT (allocator, " [%u] bytesused: %u, length: %u", i,
+ group->planes[i].bytesused, group->planes[i].length);
+ GST_LOG_OBJECT (allocator, " [%u] MMAP offset: %u", i,
+ group->planes[i].m.mem_offset);
+ }
+ }
+#endif
+
+ return group;
+
+querybuf_failed:
+ {
+ GST_ERROR ("error querying buffer %d: %s", index, g_strerror (errno));
+ goto failed;
+ }
+buffer_too_short:
+ {
+ GST_ERROR ("buffer size %" G_GSIZE_FORMAT
+ " is smaller then negotiated size %" G_GSIZE_FORMAT
+ ", this is usually the result of a bug in the v4l2 driver or libv4l.",
+ buf_size, img_size);
+ goto failed;
+ }
+failed:
+ gst_v4l2_memory_group_free (group);
+ return NULL;
+}
+
+
+/*************************************/
+/* GstV4lAllocator implementation */
+/*************************************/
+
+static void
+gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
+{
+ GstV4l2MemoryGroup *group = mem->group;
+
+ GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
+ mem->plane, group->buffer.index);
+
+ /* When all memory are back, put the group back in the free queue */
+ if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
+ GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
+ gst_atomic_queue_push (allocator->free_queue, group);
+ g_signal_emit (allocator, gst_v4l2_allocator_signals[GROUP_RELEASED], 0);
+ }
+
+ /* Keep last, allocator may be freed after this call */
+ g_object_unref (allocator);
+}
+
+static void
+gst_v4l2_allocator_free (GstAllocator * gallocator, GstMemory * gmem)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) gallocator;
+ GstV4l2Memory *mem = (GstV4l2Memory *) gmem;
+ GstV4l2MemoryGroup *group = mem->group;
+
+ GST_LOG_OBJECT (allocator, "freeing plane %i of buffer %u",
+ mem->plane, group->buffer.index);
+
+ switch (allocator->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (mem->data) {
+ munmap (mem->data, group->planes[mem->plane].length);
+ } else if (group->planes[mem->plane].m.fd > 0) {
+ close (group->planes[mem->plane].m.fd);
+ }
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ _v4l2mem_free (mem);
+}
+
+static void
+gst_v4l2_allocator_dispose (GObject * obj)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
+ gint i;
+
+ GST_LOG_OBJECT (obj, "called");
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ allocator->groups[i] = NULL;
+ if (group)
+ gst_v4l2_memory_group_free (group);
+ }
+
+ G_OBJECT_CLASS (parent_class)->dispose (obj);
+}
+
+static void
+gst_v4l2_allocator_finalize (GObject * obj)
+{
+ GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
+
+ GST_LOG_OBJECT (obj, "called");
+
+ v4l2_close (allocator->video_fd);
+ gst_atomic_queue_unref (allocator->free_queue);
+
+ G_OBJECT_CLASS (parent_class)->finalize (obj);
+}
+
+static void
+gst_v4l2_allocator_class_init (GstV4l2AllocatorClass * klass)
+{
+ GObjectClass *object_class;
+ GstAllocatorClass *allocator_class;
+
+ allocator_class = (GstAllocatorClass *) klass;
+ object_class = (GObjectClass *) klass;
+
+ allocator_class->alloc = NULL;
+ allocator_class->free = gst_v4l2_allocator_free;
+
+ object_class->dispose = gst_v4l2_allocator_dispose;
+ object_class->finalize = gst_v4l2_allocator_finalize;
+
+ gst_v4l2_allocator_signals[GROUP_RELEASED] = g_signal_new ("group-released",
+ G_TYPE_FROM_CLASS (object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL,
+ G_TYPE_NONE, 0);
+
+ GST_DEBUG_CATEGORY_INIT (v4l2allocator_debug, "v4l2allocator", 0,
+ "V4L2 Allocator");
+}
+
+static void
+gst_v4l2_allocator_init (GstV4l2Allocator * allocator)
+{
+ GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
+
+ alloc->mem_type = GST_V4L2_MEMORY_TYPE;
+ alloc->mem_map = (GstMemoryMapFunction) _v4l2mem_map;
+ alloc->mem_unmap = (GstMemoryUnmapFunction) _v4l2mem_unmap;
+ alloc->mem_share = (GstMemoryShareFunction) _v4l2mem_share;
+ alloc->mem_is_span = (GstMemoryIsSpanFunction) _v4l2mem_is_span;
+ /* Use the default, fallback copy function */
+
+ allocator->free_queue = gst_atomic_queue_new (VIDEO_MAX_FRAME);
+
+ GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
+}
+
+#define GST_V4L2_ALLOCATOR_PROBE(obj,type) \
+ gst_v4l2_allocator_probe ((obj), V4L2_MEMORY_ ## type, \
+ GST_V4L2_ALLOCATOR_FLAG_ ## type ## _REQBUF, \
+ GST_V4L2_ALLOCATOR_FLAG_ ## type ## _CREATE_BUF)
+static guint32
+gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
+ guint32 breq_flag, guint32 bcreate_flag)
+{
+ struct v4l2_requestbuffers breq = { 0 };
+ guint32 flags = 0;
+
+ breq.type = allocator->type;
+ breq.count = 0;
+ breq.memory = memory;
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_REQBUFS, &breq) == 0) {
+ struct v4l2_create_buffers bcreate = { 0 };
+
+ flags |= breq_flag;
+
+ bcreate.memory = V4L2_MEMORY_MMAP;
+ bcreate.format = allocator->format;
+
+ if ((v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) == 0))
+ flags |= bcreate_flag;
+ }
+
+ return flags;
+}
+
+GstV4l2Allocator *
+gst_v4l2_allocator_new (GstObject * parent, gint video_fd,
+ struct v4l2_format * format)
+{
+ GstV4l2Allocator *allocator;
+ guint32 flags = 0;
+ gchar *name, *parent_name;
+
+ parent_name = gst_object_get_name (parent);
+ name = g_strconcat (parent_name, ":allocator", NULL);
+ g_free (parent_name);
+
+ allocator = g_object_new (GST_TYPE_V4L2_ALLOCATOR, "name", name, NULL);
+ g_free (name);
+
+ /* Save everything */
+ allocator->video_fd = v4l2_dup (video_fd);
+ allocator->type = format->type;
+ allocator->format = *format;
+
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, MMAP);
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, USERPTR);
+ flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, DMABUF);
+
+ if (flags == 0)
+ goto not_supported;
+
+ return allocator;
+
+not_supported:
+ {
+ GST_ERROR_OBJECT (allocator,
+ "No memory model supported by GStreamer for this device");
+ g_object_unref (allocator);
+ return NULL;
+ }
+}
+
+guint
+gst_v4l2_allocator_start (GstV4l2Allocator * allocator, guint32 count,
+ guint32 memory)
+{
+ struct v4l2_requestbuffers breq = { count, allocator->type, memory };
+ gboolean can_allocate;
+ gint i;
+
+ g_return_val_if_fail (count != 0, 0);
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (allocator->active)
+ goto already_active;
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_REQBUFS, &breq) < 0)
+ goto reqbufs_failed;
+
+ if (breq.count < 1)
+ goto out_of_memory;
+
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, MMAP);
+ break;
+ case V4L2_MEMORY_USERPTR:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, USERPTR);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, DMABUF);
+ break;
+ default:
+ can_allocate = FALSE;
+ break;
+ }
+
+ GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
+ breq.count, memory_type_to_str (memory), count);
+
+ allocator->can_allocate = can_allocate;
+ allocator->count = breq.count;
+ allocator->memory = memory;
+
+ /* Create memory groups */
+ for (i = 0; i < allocator->count; i++) {
+ allocator->groups[i] = gst_v4l2_memory_group_new (allocator, i);
+ if (allocator->groups[i] == NULL)
+ goto error;
+
+ gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
+ }
+
+ g_atomic_int_set (&allocator->active, TRUE);
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return breq.count;
+
+already_active:
+ {
+ GST_ERROR_OBJECT (allocator,
+ "error requesting %d buffers: %s", count, g_strerror (errno));
+ goto error;
+ }
+reqbufs_failed:
+ {
+ GST_ERROR_OBJECT (allocator,
+ "error requesting %d buffers: %s", count, g_strerror (errno));
+ goto error;
+ }
+out_of_memory:
+ {
+ GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
+ goto error;
+ }
+error:
+ {
+ breq.count = 0;
+ goto done;
+ }
+}
+
+GstV4l2Return
+gst_v4l2_allocator_stop (GstV4l2Allocator * allocator)
+{
+ struct v4l2_requestbuffers breq = { 0, allocator->type, allocator->memory };
+ gint i = 0;
+ GstV4l2Return ret = GST_V4L2_OK;
+
+ GST_DEBUG_OBJECT (allocator, "stop allocator");
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (!allocator->active)
+ goto done;
+
+ if (gst_atomic_queue_length (allocator->free_queue) != allocator->count) {
+ GST_DEBUG_OBJECT (allocator, "allocator is still in use");
+ ret = GST_V4L2_BUSY;
+ goto done;
+ }
+
+ while (gst_atomic_queue_pop (allocator->free_queue)) {
+ /* nothing */
+ };
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ allocator->groups[i] = NULL;
+ if (group)
+ gst_v4l2_memory_group_free (group);
+ }
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_REQBUFS, &breq) < 0)
+ goto reqbufs_failed;
+
+ g_atomic_int_set (&allocator->active, FALSE);
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return ret;
+
+reqbufs_failed:
+ {
+ GST_ERROR_OBJECT (allocator,
+ "error releasing buffers buffers: %s", g_strerror (errno));
+ ret = GST_V4L2_ERROR;
+ goto done;
+ }
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
+{
+ struct v4l2_create_buffers bcreate = { 0 };
+ GstV4l2MemoryGroup *group = NULL;
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (!allocator->active)
+ goto done;
+
+ bcreate.memory = allocator->memory;
+ bcreate.format = allocator->format;
+ bcreate.count = 1;
+
+ if (!allocator->can_allocate)
+ goto done;
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
+ goto create_bufs_failed;
+
+ group = gst_v4l2_memory_group_new (allocator, bcreate.index);
+
+ if (group) {
+ allocator->groups[bcreate.index] = group;
+ allocator->count++;
+ }
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+ return group;
+
+create_bufs_failed:
+ {
+ GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
+ g_strerror (errno));
+ goto done;
+ }
+}
+
+static GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
+{
+ GstV4l2MemoryGroup *group;
+
+ if (!g_atomic_int_get (&allocator->active))
+ return NULL;
+
+ group = gst_atomic_queue_pop (allocator->free_queue);
+
+ if (group == NULL) {
+ if (allocator->can_allocate) {
+ group = gst_v4l2_allocator_create_buf (allocator);
+
+ /* Don't hammer on CREATE_BUFS */
+ if (group == NULL)
+ allocator->can_allocate = FALSE;
+ }
+ }
+
+ return group;
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
+{
+ GstV4l2MemoryGroup *group;
+ gint i;
+
+ g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
+
+ group = gst_v4l2_allocator_alloc (allocator);
+
+ if (group == NULL)
+ return NULL;
+
+ for (i = 0; i < group->n_mem; i++) {
+ if (group->mem[i] == NULL) {
+ gpointer data;
+ data = v4l2_mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, allocator->video_fd, group->planes[i].m.mem_offset);
+
+ if (data == MAP_FAILED)
+ goto mmap_failed;
+
+ GST_LOG_OBJECT (allocator,
+ "mmap buffer length %d, data offset %d, plane %d",
+ group->planes[i].length, group->planes[i].data_offset, i);
+
+ group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
+ NULL, group->planes[i].length, 0, 0, group->planes[i].length, i,
+ data, group);
+ } else {
+ /* Take back the allocator reference */
+ gst_object_ref (allocator);
+ }
+
+ group->mems_allocated++;
+ }
+
+ /* Ensure group size. Unlike GST, v4l2 have size (bytesused) initially set
+ * to 0. As length might be bigger then the expected size exposed in the
+ * format, we simply set bytesused initially and reset it here for
+ * simplicity */
+ gst_v4l2_allocator_reset_size (allocator, group);
+
+ return group;
+
+mmap_failed:
+ {
+ GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
+ g_strerror (errno));
+
+ if (group->mems_allocated > 0) {
+ /* If one or more mmap worked, we need to unref the memory, otherwise
+ * they will keep a ref on the allocator and leak it. This will put back
+ * the group into the free_queue */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_unref (group->mem[i]);
+ } else {
+ /* Otherwise, group has to be on free queue for _stop() to work */
+ gst_atomic_queue_push (allocator->free_queue, group);
+ }
+ return NULL;
+ }
+}
+
+#if 0
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator)
+{
+ /* TODO */
+ return NULL;
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
+ gint dmabuf_fd[VIDEO_MAX_PLANES])
+{
+ /* TODO */
+ return NULL;
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
+ gpointer data[VIDEO_MAX_PLANES], gint stride[VIDEO_MAX_PLANES],
+ gint offset[VIDEO_MAX_PLANES])
+{
+ /* TODO */
+ return NULL;
+}
+#endif
+
+void
+gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
+{
+ gint i;
+
+ GST_OBJECT_LOCK (allocator);
+
+ if (!allocator->active)
+ goto done;
+
+ for (i = 0; i < allocator->count; i++) {
+ GstV4l2MemoryGroup *group = allocator->groups[i];
+ guint32 queued = (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE);
+ gint n;
+
+ if (group->buffer.flags & queued) {
+ group->buffer.flags &= ~queued;
+
+ for (n = 0; n < group->n_mem; n++)
+ gst_memory_unref (group->mem[n]);
+ }
+ }
+
+done:
+ GST_OBJECT_UNLOCK (allocator);
+}
+
+gboolean
+gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ gboolean ret = TRUE;
+ gint i;
+
+ /* update sizes */
+ if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
+ for (i = 0; i < group->n_mem; i++)
+ group->planes[i].bytesused =
+ gst_memory_get_sizes (group->mem[i], NULL, NULL);
+ } else {
+ group->buffer.bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
+ }
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_QBUF, &group->buffer) < 0) {
+ GST_ERROR_OBJECT (allocator, "failed queing buffer %i: %s",
+ group->buffer.index, g_strerror (errno));
+ ret = FALSE;
+ }
+
+ /* Ensure the memory will stay around and is RO */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_ref (group->mem[i]);
+
+ return ret;
+}
+
+GstV4l2MemoryGroup *
+gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator)
+{
+ struct v4l2_buffer buffer = { 0 };
+ struct v4l2_plane planes[VIDEO_MAX_PLANES] = { {0} };
+ gint i;
+
+ GstV4l2MemoryGroup *group = NULL;
+
+ buffer.type = allocator->type;
+ buffer.memory = allocator->memory;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
+ buffer.length = allocator->format.fmt.pix_mp.num_planes;
+ buffer.m.planes = planes;
+ }
+
+ if (v4l2_ioctl (allocator->video_fd, VIDIOC_DQBUF, &buffer) < 0)
+ goto error;
+
+ group = allocator->groups[buffer.index];
+ group->buffer = buffer;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
+ group->buffer.m.planes = group->planes;
+ memcpy (group->planes, buffer.m.planes, sizeof (planes));
+ } else {
+ group->planes[0].bytesused = group->buffer.bytesused;
+ group->planes[0].length = group->buffer.length;
+ g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
+ memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
+ }
+
+ /* And update memory size */
+ if (V4L2_TYPE_IS_OUTPUT (allocator->type)) {
+ gst_v4l2_allocator_reset_size (allocator, group);
+ } else {
+ /* for capture, simply read the size */
+ for (i = 0; i < group->n_mem; i++) {
+ gst_memory_resize (group->mem[i], 0, group->planes[i].bytesused);
+ }
+ }
+
+ /* Release the memory, possibly making it RW again */
+ for (i = 0; i < group->n_mem; i++)
+ gst_memory_unref (group->mem[i]);
+
+ return group;
+
+error:
+ GST_ERROR_OBJECT (allocator, "failed dequeuing a %s buffer: %s",
+ memory_type_to_str (allocator->memory), g_strerror (errno));
+
+ switch (errno) {
+ case EAGAIN:
+ GST_WARNING_OBJECT (allocator,
+ "Non-blocking I/O has been selected using O_NONBLOCK and"
+ " no buffer was in the outgoing queue.");
+ break;
+ case EINVAL:
+ GST_ERROR_OBJECT (allocator,
+ "The buffer type is not supported, or the index is out of bounds, "
+ "or no buffers have been allocated yet, or the userptr "
+ "or length are invalid.");
+ break;
+ case ENOMEM:
+ GST_ERROR_OBJECT (allocator,
+ "insufficient memory to enqueue a user pointer buffer");
+ break;
+ case EIO:
+ GST_INFO_OBJECT (allocator,
+ "VIDIOC_DQBUF failed due to an internal error."
+ " Can also indicate temporary problems like signal loss."
+ " Note the driver might dequeue an (empty) buffer despite"
+ " returning an error, or even stop capturing.");
+ /* have we de-queued a buffer ? */
+ if (!(buffer.flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))) {
+ GST_DEBUG_OBJECT (allocator, "reenqueing buffer");
+ /* FIXME ... should we do something here? */
+ }
+ break;
+ case EINTR:
+ GST_WARNING_OBJECT (allocator, "could not sync on a buffer on device");
+ break;
+ default:
+ GST_WARNING_OBJECT (allocator,
+ "Grabbing frame got interrupted unexpectedly. %d: %s.", errno,
+ g_strerror (errno));
+ break;
+ }
+ return NULL;
+}
+
+void
+gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
+ GstV4l2MemoryGroup * group)
+{
+ gsize size;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
+ gint i;
+
+ for (i = 0; i < group->n_mem; i++) {
+ size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ gst_memory_resize (group->mem[i], 0, size);
+ }
+
+ } else {
+ size = allocator->format.fmt.pix.sizeimage;
+ gst_memory_resize (group->mem[0], 0, size);
+ }
+}