summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-19 09:22:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-19 09:22:31 -0700
commita4ee891b7e918e5a005b43171f16ad5f8b3bc7d1 (patch)
tree0e9daf58f9fe62f0ced80a7fe2b891a0be32c9fc /include
parent88e0a74902f894fbbc55ad3ad2cb23b4bfba555c (diff)
parent61b123ffcedac72a1ac6a96d1da87d25efddcbda (diff)
Merge tag 'bitmap-6.0-rc2' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: "cpumask: UP optimisation fixes follow-up As an older version of the UP optimisation fixes was merged, not all review feedback has been implemented. This implements the feedback received on the merged version [1], and the respin [2], for changes related to <linux/cpumask.h> and lib/cpumask.c" Link: https://lore.kernel.org/lkml/cover.1656777646.git.sander@svanheule.net/ [1] Link: https://lore.kernel.org/lkml/cover.1659077534.git.sander@svanheule.net/ [2] It spent for more than a week with no issues. * tag 'bitmap-6.0-rc2' of https://github.com/norov/linux: lib/cpumask: drop always-true preprocessor guard lib/cpumask: add inline cpumask_next_wrap() for UP cpumask: align signatures of UP implementations
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpumask.h26
1 files changed, 23 insertions, 3 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 0d435d0edbcb..bd047864c7ac 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -202,12 +202,13 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
return 0;
}
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p) {
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
return cpumask_first_and(src1p, src2p);
}
-static inline int cpumask_any_distribute(const struct cpumask *srcp)
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -261,7 +262,26 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
+
+ return cpumask_first(mask);
+}
+#else
unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location