summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig8
-rw-r--r--drivers/dma-buf/dma-buf.c120
-rw-r--r--drivers/dma-buf/dma-fence.c70
-rw-r--r--drivers/dma-buf/dma-heap.c14
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c22
-rw-r--r--drivers/dma-buf/heaps/system_heap.c25
-rw-r--r--drivers/dma-buf/st-dma-fence.c7
7 files changed, 200 insertions, 66 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac95..4e16c71c24b7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
This is marked experimental because we don't yet have a consistent
execution context and memory management between drivers.
+config DMABUF_DEBUG
+ bool "DMA-BUF debug checks"
+ default y if DMA_API_DEBUG
+ help
+ This option enables additional checks for DMA-BUF importers and
+ exporters. Specifically it validates that importers do not peek at the
+ underlying struct page when they import a buffer.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9ad6397aaa97..f264b70c383e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -493,7 +493,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -509,9 +509,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -597,7 +598,7 @@ err_module:
EXPORT_SYMBOL_GPL(dma_buf_export);
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -621,10 +622,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
EXPORT_SYMBOL_GPL(dma_buf_fd);
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -665,9 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf)
}
EXPORT_SYMBOL_GPL(dma_buf_put);
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+ if (!IS_ERR_OR_NULL(sg_table))
+ mangle_sg_table(sg_table);
+
+ return sg_table;
+}
+
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @importer_ops: [in] importer operations for the attachment
@@ -676,6 +704,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -734,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
goto err_unlock;
}
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
@@ -781,13 +812,24 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -798,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
@@ -818,9 +860,15 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
* dma_buf_pin - Lock down the DMA-buf
- *
* @attach: [in] attachment which should be pinned
*
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
* Returns:
* 0 on success, negative error code on failure.
*/
@@ -829,6 +877,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->pin)
@@ -839,14 +889,19 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_pin);
/**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
* @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
@@ -907,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
}
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ sg_table = __map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -970,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ __unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
@@ -1014,15 +1069,15 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
+ *
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
* void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -1074,11 +1129,12 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* shootdowns would increase the complexity quite a bit.
*
* Interface::
+ *
* int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
* unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -1111,6 +1167,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1121,6 +1182,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1154,6 +1217,8 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
@@ -1212,7 +1277,10 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
*
* Returns 0 on success, or a negative errno code otherwise.
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 7475e09b0680..d64fc03929be 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -312,22 +312,25 @@ void __dma_fence_might_wait(void)
/**
- * dma_fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_timestamp_locked - signal completion of a fence
* @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
- * only be effective the first time.
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
*
- * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
- * held.
+ * Unlike dma_fence_signal_timestamp(), this function must be called with
+ * &dma_fence.lock held.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
-int dma_fence_signal_locked(struct dma_fence *fence)
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp)
{
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
@@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence)
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
- fence->timestamp = ktime_get();
+ fence->timestamp = timestamp;
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
@@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence)
return 0;
}
+EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
+
+/**
+ * dma_fence_signal_timestamp - signal completion of a fence
+ * @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!fence)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_timestamp_locked(fence, timestamp);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal_timestamp);
+
+/**
+ * dma_fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_locked(struct dma_fence *fence)
+{
+ return dma_fence_signal_timestamp_locked(fence, ktime_get());
+}
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
@@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
- ret = dma_fence_signal_locked(fence);
+ ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_end_signalling(tmp);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index afd22c9dbdcf..6b5db954569f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
unsigned int fd_flags,
unsigned int heap_flags)
{
+ struct dma_buf *dmabuf;
+ int fd;
+
/*
* Allocations from all heaps have to begin
* and end on page boundaries.
@@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
if (!len)
return -EINVAL;
- return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, fd_flags);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ }
+ return fd;
}
static int dma_heap_open(struct inode *inode, struct file *file)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 364fc2f3e499..5d64eccd21d6 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -271,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
.release = cma_heap_dma_buf_release,
};
-static int cma_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;
@@ -289,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -348,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
-
- return ret;
+ return dmabuf;
free_pages:
kfree(buffer->pages);
@@ -365,7 +357,7 @@ free_cma:
free_buffer:
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops cma_heap_ops = {
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 17e0e9a68baf..29e49ac17251 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
return NULL;
}
-static int system_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap,
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
goto free_buffer;
+ }
page = alloc_largest_available(size_remaining, max_order);
if (!page)
@@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
- return ret;
+ return dmabuf;
free_pages:
for_each_sgtable_sg(table, sg, i) {
@@ -418,7 +413,7 @@ free_buffer:
__free_pages(page, compound_order(page));
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops system_heap_ops = {
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index e593064341c8..c8a12d7ad71a 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -471,8 +471,11 @@ static int thread_signal_callback(void *arg)
dma_fence_signal(f1);
smp_store_mb(cb.seen, false);
- if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
- miss++, cb.seen = true;
+ if (!f2 ||
+ dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
+ miss++;
+ cb.seen = true;
+ }
if (!t->before)
dma_fence_signal(f1);