summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-09-04 19:41:45 -0300
committerJason Gunthorpe <jgg@nvidia.com>2020-09-09 15:33:17 -0300
commitebc24096c4c40017d9f9b0fddc5d69b94ad10369 (patch)
tree27988092ea178560fb2fb0d94172f951d4b961fb /include/rdma
parent3361c29e9279e682c0e9a7d7461b4e3bbc77830b (diff)
RDMA/umem: Add rdma_umem_for_each_dma_block()
This helper does the same as rdma_for_each_block(), except it works on a umem. This simplifies most of the call sites. Link: https://lore.kernel.org/r/4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Acked-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_umem.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 07a764eb692e..b880512ba95f 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -40,6 +40,26 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
PAGE_SHIFT;
}
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ struct ib_umem *umem,
+ unsigned long pgsz)
+{
+ __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+ __rdma_block_iter_next(biter);)
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,