summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/idr.rst3
-rw-r--r--include/linux/xarray.h15
-rw-r--r--tools/include/linux/sched/mm.h2
3 files changed, 20 insertions, 0 deletions
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
index 2eb5afdb9931..18d724867064 100644
--- a/Documentation/core-api/idr.rst
+++ b/Documentation/core-api/idr.rst
@@ -17,6 +17,9 @@ solution to the problem to avoid everybody inventing their own. The IDR
provides the ability to map an ID to a pointer, while the IDA provides
only ID allocation, and as a result is much more memory-efficient.
+The IDR interface is deprecated; please use the :doc:`XArray <xarray>`
+instead.
+
IDR usage
=========
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index c29e11b2c073..44dd6d6e01bc 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -16,6 +16,7 @@
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/sched/mm.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -586,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -612,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -687,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa);
@@ -714,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_bh(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_bh(xa);
@@ -741,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
{
void *curr;
+ might_alloc(gfp);
xa_lock_irq(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_irq(xa);
@@ -770,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock(xa);
@@ -799,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa);
@@ -828,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa);
@@ -857,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa);
@@ -886,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa);
@@ -915,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_irq(xa);
@@ -948,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa);
@@ -981,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_bh(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa);
@@ -1014,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
{
int err;
+ might_alloc(gfp);
xa_lock_irq(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h
index c8d9f19c1f35..967294b8edcf 100644
--- a/tools/include/linux/sched/mm.h
+++ b/tools/include/linux/sched/mm.h
@@ -1,4 +1,6 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
#define _TOOLS_PERF_LINUX_SCHED_MM_H
+#define might_alloc(gfp) do { } while (0)
+
#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */