summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2014-10-09 15:50:15 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2014-10-09 15:50:19 +1100
commit1e7bf38ec36c9b636b423be1fdb3015dcc9c4c01 (patch)
treebd4b853b501bf3559e7945c51ad2f89f721492d5 /include/linux
parente47bb4e2c4c63b2676ad9c202a88571028937636 (diff)
parent0a15cbdaefcd2d44fa4b2933627012e5f2f250b1 (diff)
Merge remote-tracking branch 'percpu/for-next'
Conflicts: arch/s390/include/asm/cputime.h arch/s390/kernel/irq.c arch/s390/kernel/processor.c arch/s390/kernel/vtime.c fs/ext4/super.c kernel/irq_work.c
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/cpumask.h11
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/percpu-defs.h3
-rw-r--r--include/linux/percpu-refcount.h122
-rw-r--r--include/linux/percpu.h13
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/proportions.h5
9 files changed, 115 insertions, 59 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 02c5d950f444..c9be1589415a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -150,6 +150,7 @@ enum {
};
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2997af6d2ccd..0a9a6da21e74 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void)
*
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
* cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
*/
#ifdef CONFIG_CPUMASK_OFFSTACK
typedef struct cpumask *cpumask_var_t;
+#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
#else
typedef struct cpumask cpumask_var_t[1];
+#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
+#include <linux/gfp.h>
/*
* When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
seqcount_t sequence;
};
-int fprop_global_init(struct fprop_global *p);
+int fprop_global_init(struct fprop_global *p, gfp_t gfp);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
raw_spinlock_t lock; /* Protect period and numerator */
};
-int fprop_local_init_percpu(struct fprop_local_percpu *pl);
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ecbc52f9ff77..8422b4ed6882 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu (&__get_cpu_var(kstat))
-#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index cfd56046ecec..420032d41d27 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -257,9 +257,6 @@ do { \
#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-/* keep until we have removed all uses of __this_cpu_ptr */
-#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
-
/*
* Must be an lvalue. Since @var must be a simple identifier,
* we force a syntax error here if it isn't.
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 68a64f11ce02..d5c89e0dd0e6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
*
* The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
* than an atomic_t - this is because of the way shutdown works, see
- * percpu_ref_kill()/PCPU_COUNT_BIAS.
+ * percpu_ref_kill()/PERCPU_COUNT_BIAS.
*
* Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
* refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -49,29 +49,60 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/gfp.h>
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
+/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
+enum {
+ __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
+ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
+
+ __PERCPU_REF_FLAG_BITS = 2,
+};
+
+/* @flags for percpu_ref_init() */
+enum {
+ /*
+ * Start w/ ref == 1 in atomic mode. Can be switched to percpu
+ * operation using percpu_ref_switch_to_percpu(). If initialized
+ * with this flag, the ref will stay in atomic mode until
+ * percpu_ref_switch_to_percpu() is invoked on it.
+ */
+ PERCPU_REF_INIT_ATOMIC = 1 << 0,
+
+ /*
+ * Start dead w/ ref == 0 in atomic mode. Must be revived with
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ */
+ PERCPU_REF_INIT_DEAD = 1 << 1,
+};
+
struct percpu_ref {
- atomic_t count;
+ atomic_long_t count;
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
- unsigned long pcpu_count_ptr;
+ unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_kill;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
struct rcu_head rcu;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
- percpu_ref_func_t *release);
-void percpu_ref_reinit(struct percpu_ref *ref);
+ percpu_ref_func_t *release, unsigned int flags,
+ gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
-void __percpu_ref_kill_expedited(struct percpu_ref *ref);
+void percpu_ref_reinit(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
return percpu_ref_kill_and_confirm(ref, NULL);
}
-#define PCPU_REF_DEAD 1
-
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
- * branches as it can't assume that @ref->pcpu_count is not NULL.
+ * branches as it can't assume that @ref->percpu_count is not NULL.
*/
-static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
- unsigned __percpu **pcpu_countp)
+static inline bool __ref_is_percpu(struct percpu_ref *ref,
+ unsigned long __percpu **percpu_countp)
{
- unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
+ unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();
- if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
return false;
- *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+ *percpu_countp = (unsigned long __percpu *)percpu_ptr;
return true;
}
@@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_inc().
- */
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
static inline void percpu_ref_get(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_inc(*percpu_count);
else
- atomic_inc(&ref->count);
+ atomic_long_inc(&ref->count);
rcu_read_unlock_sched();
}
@@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
* Increment a percpu refcount unless its count already reached zero.
* Returns %true on success; %false on failure.
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
- int ret = false;
+ unsigned long __percpu *percpu_count;
+ int ret;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
} else {
- ret = atomic_inc_not_zero(&ref->count);
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
* Increment a percpu refcount unless it has already been killed. Returns
* %true on success; %false on failure.
*
- * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
- * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
- * used. After the confirm_kill callback is invoked, it's guaranteed that
- * no new reference will be given out by percpu_ref_tryget().
+ * Completion of percpu_ref_kill() in itself doesn't guarantee that this
+ * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
+ * should be used. After the confirm_kill callback is invoked, it's
+ * guaranteed that no new reference will be given out by
+ * percpu_ref_tryget_live().
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
int ret = false;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
+ } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_put(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_dec(*pcpu_count);
- else if (unlikely(atomic_dec_and_test(&ref->count)))
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_dec(*percpu_count);
+ else if (unlikely(atomic_long_dec_and_test(&ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
@@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
* @ref: percpu_ref to test
*
* Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
- if (__pcpu_ref_alive(ref, &pcpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
return false;
- return !atomic_read(&ref->count);
+ return !atomic_long_read(&ref->count);
}
#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..a3aa63e47637 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,9 +48,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << 10)
#else
-#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
#endif
extern void *pcpu_base_addr;
@@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void);
#endif
extern void __init percpu_init_late(void);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
-#define alloc_percpu(type) \
- (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
+#define alloc_percpu_gfp(type, gfp) \
+ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
+ __alignof__(type), gfp)
+#define alloc_percpu(type) \
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/gfp.h>
#ifdef CONFIG_SMP
@@ -26,14 +27,14 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
-#define percpu_counter_init(fbc, value) \
+#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, &__key); \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
s64 count;
};
-static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
{
fbc->count = amount;
return 0;
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/gfp.h>
struct prop_global {
/*
@@ -40,7 +41,7 @@ struct prop_descriptor {
struct mutex mutex; /* serialize the prop_global switch */
};
-int prop_descriptor_init(struct prop_descriptor *pd, int shift);
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
raw_spinlock_t lock; /* protect the snapshot state */
};
-int prop_local_init_percpu(struct prop_local_percpu *pl);
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,