summaryrefslogtreecommitdiff
path: root/drivers/media/video/videobuf2-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/videobuf2-core.c')
-rw-r--r--drivers/media/video/videobuf2-core.c207
1 files changed, 203 insertions, 4 deletions
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 2e8f1df775b6..b431dc6d592e 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -106,6 +106,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_memop(q, unmap_dmabuf, p->mem_priv);
+
+ call_memop(q, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof *p);
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -227,6 +257,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
/* Free MMAP buffers or release USERPTR buffers */
if (q->memory == V4L2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
}
@@ -349,6 +381,12 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
*/
memcpy(b->m.planes, vb->v4l2_planes,
b->length * sizeof(struct v4l2_plane));
+
+ if (q->memory == V4L2_MEMORY_DMABUF) {
+ unsigned int plane;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ b->m.planes[plane].m.fd = 0;
+ }
} else {
/*
* We use length and offset in v4l2_planes array even for
@@ -360,6 +398,8 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
else if (q->memory == V4L2_MEMORY_USERPTR)
b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = 0;
}
/*
@@ -451,6 +491,20 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* vb2_reqbufs() - Initiate streaming
* @q: videobuf2 queue
* @req: struct passed from userspace to vidioc_reqbufs handler in driver
@@ -483,8 +537,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
- if (req->memory != V4L2_MEMORY_MMAP
- && req->memory != V4L2_MEMORY_USERPTR) {
+ if (req->memory != V4L2_MEMORY_MMAP &&
+ req->memory != V4L2_MEMORY_DMABUF &&
+ req->memory != V4L2_MEMORY_USERPTR) {
dprintk(1, "reqbufs: unsupported memory type\n");
return -EINVAL;
}
@@ -513,6 +568,11 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EINVAL;
}
+ if (req->memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
/*
* We already have buffers allocated, so first check if they
@@ -619,8 +679,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
return -EBUSY;
}
- if (create->memory != V4L2_MEMORY_MMAP
- && create->memory != V4L2_MEMORY_USERPTR) {
+ if (create->memory != V4L2_MEMORY_MMAP &&
+ create->memory != V4L2_MEMORY_USERPTR &&
+ create->memory != V4L2_MEMORY_DMABUF) {
dprintk(1, "%s(): unsupported memory type\n", __func__);
return -EINVAL;
}
@@ -644,6 +705,11 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
return -EINVAL;
}
+ if (create->memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "%s(): DMABUF for current setup unsupported\n", __func__);
+ return -EINVAL;
+ }
+
if (q->num_buffers == VIDEO_MAX_FRAME) {
dprintk(1, "%s(): maximum number of buffers already allocated\n",
__func__);
@@ -776,6 +842,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
+ unsigned int plane;
if (vb->state != VB2_BUF_STATE_ACTIVE)
return;
@@ -786,6 +853,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, vb->state);
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, finish, vb->planes[plane].mem_priv);
+
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
@@ -839,6 +910,14 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
b->m.planes[plane].length;
}
}
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].bytesused =
+ b->m.planes[plane].bytesused;
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ }
+ }
} else {
/*
* Single-planar buffers do not use planes array,
@@ -853,6 +932,10 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
v4l2_planes[0].m.userptr = b->m.userptr;
v4l2_planes[0].length = b->length;
}
+
+ if (b->memory == V4L2_MEMORY_DMABUF)
+ v4l2_planes[0].m.fd = b->m.fd;
+
}
vb->v4l2_buf.field = b->field;
@@ -957,14 +1040,114 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ ret = __fill_vb2_buffer(vb, b, planes);
+ if (ret)
+ return ret;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "qbuf: invalid dmabuf fd for "
+ "plane %d\n", plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf) {
+ planes[plane].length = dbuf->size;
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(3, "qbuf: buffer description for plane %d changed, "
+ "reattaching dma buf\n", plane);
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+
+ /* Acquire each plane's memory */
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, q->plane_sizes[plane], write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed acquiring dmabuf "
+ "memory for plane %d\n", plane);
+ ret = PTR_ERR(mem_priv);
+ goto err;
+ }
+
+ planes[plane].length = dbuf->size;
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "qbuf: failed mapping dmabuf "
+ "memory for plane %d\n", plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->queued_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, prepare, vb->planes[plane].mem_priv);
+
q->ops->buf_queue(vb);
}
@@ -980,6 +1163,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
case V4L2_MEMORY_USERPTR:
ret = __qbuf_userptr(vb, b);
break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
@@ -1335,6 +1521,19 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
return ret;
}
+ /* TODO: this unpins the buffer(dma_buf_unmap_attachment()).. but
+ * really we want to do this just after DMA, not when the
+ * buffer is dequeued..
+ */
+ if (q->memory == V4L2_MEMORY_DMABUF) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; ++i) {
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+ }
+
switch (vb->state) {
case VB2_BUF_STATE_DONE:
dprintk(3, "dqbuf: Returning done buffer\n");