summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-06-30 13:43:28 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2009-06-30 13:43:28 +1000
commitd6509f5fc03dd87af2b35af2502b18b5eeba82a6 (patch)
tree1e6d8b33b4c0888b1675a15f56f3875cda181a03
parent7ca24e14b42f9165310cb6274657ab243451bcb9 (diff)
parent1e8c3fdac4486ee8dbc67e89df34328be1b724b3 (diff)
Merge branch 'quilt/rr'
Conflicts: arch/powerpc/platforms/powermac/setup.c kernel/cpu.c
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/include/asm/topology.h18
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/frv/include/asm/gdb-stub.h1
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/kernel/smp.c30
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c9
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/mipssim/sim_smtc.c5
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h1
-rw-r--r--arch/mn10300/include/asm/mmu_context.h12
-rw-r--r--arch/parisc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h16
-rw-r--r--arch/um/include/asm/mmu_context.h4
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h6
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/kernel/acpi/cstate.c36
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/tlb.c15
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--drivers/acpi/processor_perflib.c3
-rw-r--r--drivers/acpi/processor_throttling.c85
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/firmware/dcdbas.c49
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/oprofile/buffer_sync.c3
-rw-r--r--include/asm-generic/topology.h17
-rw-r--r--include/linux/cpumask.h709
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/lguest.h2
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/topology.h6
-rw-r--r--init/main.c5
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/smp.c7
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--mm/quicklist.c3
-rw-r--r--virt/kvm/kvm_main.c3
84 files changed, 489 insertions, 800 deletions
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 547e90951cec..3f390e8cc0b3 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#else /* CONFIG_SMP */
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index b4f284c72ff3..36b3a30ba0e5 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
return node;
}
-static inline cpumask_t node_to_cpumask(int node)
-{
- cpumask_t node_cpu_mask = CPU_MASK_NONE;
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpu_set(cpu, node_cpu_mask);
- }
-
-#ifdef DEBUG_NUMA
- printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
-#endif
-
- return node_cpu_mask;
-}
-
extern struct cpumask node_to_cpumask_map[];
/* FIXME: This is dumb, recalculating every time. But simple. */
static const struct cpumask *cpumask_of_node(int node)
@@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
#endif /* !CONFIG_NUMA */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b1fe5674c3a1..42aa078a5e4d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
static void
-send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
+send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
int i;
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
set_bit(operation, &ipi_data[i].bits);
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
wripir(i);
}
@@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
printk(KERN_WARNING
"smp_send_reschedule: Sending IPI to self.\n");
#endif
- send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void
@@ -636,17 +636,17 @@ smp_send_stop(void)
if (hard_smp_processor_id() != boot_cpu_id)
printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
#endif
- send_ipi_message(to_whom, IPI_CPU_STOP);
+ send_ipi_message(&to_whom, IPI_CPU_STOP);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static void
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418b..fd03fb63a332 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm)
{
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 263fed05ea33..c3fe09f8fd77 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -101,14 +101,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_SMP
/* check for possible thread migration */
- if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+ if (!cpumask_empty(mm_cpumask(next)) &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
#endif
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
#endif
}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a06e735b262a..e0d763be1846 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* show local interrupt info
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc5..a45ab5dd8255 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c5..e0d32770bb3d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_clear(cpu, p->mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- cpu_set(cpu, mm->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (tlb_ops_need_broadcast())
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
else
local_flush_tlb_mm(mm);
}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_page(vma, uaddr);
}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_range(vma, start, end);
}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc74380..6bda76a43199 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
}
spin_unlock(&cpu_asid_lock);
- mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid;
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce0..d9773a63e151 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
return;
}
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
index 24f9738670bd..2da716407ff2 100644
--- a/arch/frv/include/asm/gdb-stub.h
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -90,7 +90,6 @@ extern void gdbstub_do_rx(void);
extern asmlinkage void __debug_stub_init_break(void);
extern asmlinkage void __break_hijack_kernel_event(void);
extern asmlinkage void __break_hijack_kernel_event_breaks_here(void);
-extern asmlinkage void start_kernel(void);
extern asmlinkage void gdbstub_rx_handler(void);
extern asmlinkage void gdbstub_rx_irq(void);
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e051..0b3b3997decd 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 7b4c8c70b2d1..9ac48c34daac 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
@@ -103,8 +102,6 @@ void build_cpu_to_node_map(void);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4c..75d75c9f31f3 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -301,7 +301,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
- smp_call_function_mask(mm->cpu_vm_mask,
+ smp_call_function_many(mm_cpumask(mm),
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a735cc567931 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
if (prev != next) {
#ifdef CONFIG_SMP
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
#endif /* CONFIG_SMP */
/* Set MPTB = next->pgd */
*(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
}
#ifdef CONFIG_SMP
else
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))
activate_context(next);
#endif /* CONFIG_SMP */
}
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc3..e67ded1aab91 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* not __ASSEMBLY__ */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..e6ec3cc12854 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
static void send_IPI_allbutself(int, int);
-static void send_IPI_mask(cpumask_t, int, int);
+static void send_IPI_mask(const struct cpumask *, int, int);
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
void smp_send_reschedule(int cpu_id)
{
WARN_ON(cpu_is_offline(cpu_id));
- send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
+ send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
}
/*==========================================================================*
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
spin_lock(&flushcache_lock);
mask=cpus_addr(cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
- send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
mb();
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
if (mm == current->mm)
activate_context(mm);
else
- cpu_clear(cpu_id, mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags);
}
if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
#ifdef DEBUG_SMP
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpus_empty(flush_cpumask)) {
/* nothing. lockup detection does not belong here */
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
if (flush_mm == current->active_mm)
activate_context(flush_mm);
else
- cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
+ cpumask_clear(cpu_id, mm_cpumask(flush_mm));
} else {
unsigned long va = flush_va;
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
cpumask = cpu_online_map;
cpu_clear(smp_processor_id(), cpumask);
- send_IPI_mask(cpumask, ipi_num, try);
+ send_IPI_mask(&cpumask, ipi_num, try);
}
/*==========================================================================*
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
+static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
{
cpumask_t physid_mask, tmp;
int cpu_id, phys_id;
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
if (num_cpus <= 1) /* NO MP */
return;
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ BUG_ON(!cpumask_equal(cpumask, &tmp));
physid_mask = CPU_MASK_NONE;
- for_each_cpu_mask(cpu_id, cpumask){
+ for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
cpu_set(phys_id, physid_mask);
}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2547d6c4a827..ffc3bbcfd3bb 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
show_mp_info(nr_cpu);
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 33fbae79af5e..7e94a49224d7 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -89,7 +89,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.irq = AU1000_RTC_MATCH2_INT,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
- .cpumask = CPU_MASK_ALL_PTR,
+ .cpumask = cpu_all_mask,
};
static struct irqaction au1x_rtcmatch2_irqaction = {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 07547231e078..59f28e9412c4 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
#define parent_node(node) (node)
-#define node_to_cpumask(node) (hub_data(node)->h_cpus)
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d3bea88d8744..d9743536a621 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Mark current->active_mm as not "active" anymore.
* We don't want to mislead possible IPI tlb flush routines.
*/
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
local_irq_save(flags);
- if (cpu_isset(cpu, mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index fd545547b8aa..9e09af34c8a8 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -19,7 +19,7 @@ struct task_struct;
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
- void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index aaa2d4ab26dc..e15f11a09311 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -78,6 +78,6 @@ extern void play_dead(void);
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* __ASM_SMP_H */
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 653be061b9ec..31a470e7ac99 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -54,7 +54,10 @@ static int __init allowcpus(char *str)
cpus_clear(cpu_allow_map);
if (cpulist_parse(str, &cpu_allow_map) == 0) {
cpu_set(0, cpu_allow_map);
- cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
+ unsigned int i;
+ for (i = 1; i < nr_cpu_ids; i++)
+ if (!cpumask_test_cpu(i, cpu_allow_map))
+ set_cpu_possible(i, false);
len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
buf[len] = '\0';
pr_debug("Allowable CPUs: %s\n", buf);
@@ -136,11 +139,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46ee..9538ca42e008 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68fd..00500fea2750 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
panic(KERN_ERR "%s called", __func__);
}
-static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index bc7d9b05e2f4..b2a7b81cba02 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -129,7 +129,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
@@ -184,7 +184,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
}
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index d6e4f656ad14..5da30b6a65b7 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
}
-static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ssmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index b165cdcb2818..d3914f2335de 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
* cores it has been used on
*/
if (vma)
- mask = vma->vm_mm->cpu_vm_mask;
+ mask = *mm_cpumask(vma->vm_mm);
else
mask = cpu_online_map;
cpu_clear(cpu, mask);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 499ffe5475df..192cfd2a539c 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
-static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
msmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 8ace27716232..326fe7a392e8 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
}
}
-static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
yos_send_ipi_single(i, action);
}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 060d853d7b35..f61c164d1e67 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
/*
* A node with nothing. We use it to avoid any special casing in
- * node_to_cpumask
+ * cpumask_of_node
*/
static struct node_data null_node = {
.hub = {
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index cbcd7eb83bd1..9aa8f2951df6 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
-static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 314691648c97..47b347c992ea 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
-static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void bcm1480_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
bcm1480_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index cad14003b84f..c00a5cb1128d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
-static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index e5a6368559af..556cce992548 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -109,7 +109,6 @@ extern asmlinkage int gdbstub_intercept(struct pt_regs *, enum exception_code);
extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-extern asmlinkage void start_kernel(void);
#ifndef CONFIG_MN10300_CACHE_DISABLED
extern asmlinkage void gdbstub_purge_cache(void);
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index a9e2e34f69b0..cb294c244de3 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
#define enter_lazy_tlb(mm, tsk) do {} while (0)
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, task) \
- cpu_set((cpu), (task)->cpu_vm_mask)
-#define cpu_maybe_ran_vm(cpu, task) \
- cpu_test_and_set((cpu), (task)->cpu_vm_mask)
+#define cpu_ran_vm(cpu, mm) \
+ cpumask_set_cpu((cpu), mm_cpumask(mm))
+#define cpu_maybe_ran_vm(cpu, mm) \
+ cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
#else
-#define cpu_ran_vm(cpu, task) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, task) true
+#define cpu_ran_vm(cpu, mm) do {} while (0)
+#define cpu_maybe_ran_vm(cpu, mm) true
#endif /* CONFIG_SMP */
/*
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 21eb45a52629..2e73623feb6b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* !ASSEMBLY */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c25f73d1d842..435b58a848da 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 054a16d68082..0b3a0183df3a 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
-
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
int of_node_to_nid(struct device_node *device);
@@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
}
#endif
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
@@ -105,8 +95,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64
#include <asm/smp.h>
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 02fed27af7f6..a5e0bc727f03 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -434,7 +434,7 @@ void __init smp_setup_cpu_maps(void)
j, cpu, intserv[j]);
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, intserv[j]);
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
cpu++;
}
}
@@ -480,7 +480,7 @@ void __init smp_setup_cpu_maps(void)
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
out:
of_node_put(dn);
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0b47de07302d..d4f318721e65 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -189,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
unsigned int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6d4da7b46b41..da12c48ef1d0 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -952,7 +952,7 @@ void __init pmac_setup_smp(void)
int cpu;
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
smp_ops = &psurge_smp_ops;
}
#endif /* CONFIG_PPC32 */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 72137bc907ac..8ebcf8836aec 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -87,7 +87,7 @@ extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 5e0ad618dc45..6e7211abd950 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern cpumask_t cpu_core_map[NR_CPUS];
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f5354..c058a4d1edfb 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -129,11 +129,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
udelay(10);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
}
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index ca64f43abe67..53ef26ced75f 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index b69ee850906d..b2180f8752ce 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -31,7 +31,6 @@
#define cpu_to_node(cpu) ((void)(cpu),0)
#define parent_node(node) ((void)(node),0)
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a9..f49e11cd4ded 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index e5ea8d332421..0c35499efe36 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
-/*
- * Returns a pointer to the cpumask of CPUs on Node 'node'.
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t *v = &(numa_cpumask_lookup_table[node])
-
-#define node_to_cpumask_ptr_next(v, node) \
- v = &(numa_cpumask_lookup_table[node])
-
struct pci_bus;
#ifdef CONFIG_PCI
extern int pcibus_to_node(struct pci_bus *pbus);
@@ -72,8 +58,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define mc_capable() (sparc64_multi_core)
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 54f42e8b0105..34d813011b7a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if(prev != next){
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
if(next != &init_mm)
__switch_mm(&next->context.id);
}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 98351c78bc81..106bf27e2a9a 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
int i;
for (i = 0; i < ncpus; ++i)
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
cpu_clear(me, cpu_online_map);
cpu_set(me, cpu_online_map);
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index d31c4a684078..33600a66755f 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -30,7 +30,7 @@
#include <asm/hw_irq.h>
#include <asm/kvm_para.h>
-/*G:031 But first, how does our Guest contact the Host to ask for privileged
+/*G:030 But first, how does our Guest contact the Host to ask for privileged
* operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself.
*
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f923203dc39a..4a2d4e0c18d9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 6a84ed166aec..1e796782cd7b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
smp_ops.send_call_func_single_ipi(cpu);
}
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_ops.send_call_func_ipi(mask);
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 8c44c232efcb..827b5207523e 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -63,6 +63,12 @@ struct cstate_entry {
};
static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
+/* Used for the cross-CPU calls */
+struct acpi_processor_cx_cross_cpu {
+ struct acpi_processor_cx *cx;
+ long retval;
+};
+
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
#define MWAIT_SUBSTATE_MASK (0xf)
@@ -77,10 +83,10 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
#define NATIVE_CSTATE_BEYOND_HALT (2)
-static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
+static void acpi_processor_ffh_cstate_probe_cpu(void *_cxcc)
{
- struct acpi_processor_cx *cx = _cx;
- long retval;
+ struct acpi_processor_cx_cross_cpu *cxcc = _cxcc;
+ struct acpi_processor_cx *cx = cxcc->cx;
unsigned int eax, ebx, ecx, edx;
unsigned int edx_part;
unsigned int cstate_type; /* C-state type and not ACPI C-state type */
@@ -94,16 +100,16 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
- retval = 0;
+ cxcc->retval = 0;
if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
- retval = -1;
+ cxcc->retval = -1;
goto out;
}
/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
- retval = -1;
+ cxcc->retval = -1;
goto out;
}
@@ -117,7 +123,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
cx->address);
out:
- return retval;
+ return;
}
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
@@ -125,6 +131,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
{
struct cstate_entry *percpu_entry;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct acpi_processor_cx_cross_cpu cxcc = { .cx = cx, };
long retval;
if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
@@ -137,13 +144,18 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
percpu_entry->states[cx->index].eax = 0;
percpu_entry->states[cx->index].ecx = 0;
- /* Make sure we are running on right CPU */
+ /* Run acpi_processor_ffh_cstate_probe_cpu() on the target CPU */
- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
+ retval = smp_call_function_single(cpu,
+ acpi_processor_ffh_cstate_probe_cpu, &cxcc, 1);
if (retval == 0) {
- /* Use the hint in CST */
- percpu_entry->states[cx->index].eax = cx->address;
- percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
+ retval = cxcc.retval;
+ if (retval == 0) {
+ /* Use the hint in CST */
+ percpu_entry->states[cx->index].eax = cx->address;
+ percpu_entry->states[cx->index].ecx =
+ MWAIT_ECX_INTERRUPT_BREAK;
+ }
}
return retval;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4d0216fcb36c..41d79f77a1d6 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -215,17 +215,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
if (cfg) {
- if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
kfree(cfg);
cfg = NULL;
- } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+ } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
GFP_ATOMIC, node)) {
free_cpumask_var(cfg->domain);
kfree(cfg);
cfg = NULL;
- } else {
- cpumask_clear(cfg->domain);
- cpumask_clear(cfg->old_domain);
}
}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635d..ec6ef60cbd17 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
- if (!cpus_equal(current->mm->cpu_vm_mask,
- cpumask_of_cpu(smp_processor_id())))
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 994dd6a4a2a0..b8143acf30f3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -572,10 +572,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
void __init init_c1e_mask(void)
{
/* If we're using c1e_idle, we need to allocate c1e_mask. */
- if (pm_idle == c1e_idle) {
- alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
- cpumask_clear(c1e_mask);
- }
+ if (pm_idle == c1e_idle)
+ zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
}
static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index de2cab132844..d1d9b0066a4a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1088,7 +1088,6 @@ void __init x86_quirk_time_init(void)
return;
}
- irq0.mask = cpumask_of_cpu(0);
setup_irq(0, &irq0);
}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2fecda69ee64..70bd79be115e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1057,12 +1057,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#endif
current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
- alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
- cpumask_clear(per_cpu(cpu_core_map, i));
- cpumask_clear(per_cpu(cpu_sibling_map, i));
- cpumask_clear(cpu_data(i).llc_shared_map);
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
}
set_cpu_sibling_map(0);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7bc65f0f62c4..0188fd37b6c0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1079,7 +1079,7 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
return insn_len;
}
-/*G:030 Once we get to lguest_init(), we know we're a Guest. The various
+/*G:029 Once we get to lguest_init(), we know we're a Guest. The various
* pv_ops structures in the kernel provide points for (almost) every routine we
* have to override to avoid privileged instructions. */
__init void lguest_init(void)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e97017e95..010b2e575002 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
{
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
- cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
@@ -235,8 +236,8 @@ void flush_tlb_current_task(void)
preempt_disable();
local_flush_tlb();
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -250,8 +251,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -269,8 +270,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, va);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, va);
preempt_enable();
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 4ceb28581652..6fa64cb1b01e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+ if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
- cpumask_copy(mask, &mm->cpu_vm_mask);
+ cpumask_copy(mask, mm_cpumask(mm));
/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 60e543d3234e..ac7f6b306460 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -509,7 +509,7 @@ int acpi_processor_preregister_performance(
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&performance_mutex);
@@ -556,7 +556,6 @@ int acpi_processor_preregister_performance(
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 16560014f7bd..aa1dd6a75a73 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -74,7 +74,7 @@ static int acpi_processor_update_tsd_coord(void)
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/*
@@ -102,7 +102,6 @@ static int acpi_processor_update_tsd_coord(void)
if (retval)
goto err_ret;
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
@@ -852,10 +851,21 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
+struct get_throttling {
+ struct acpi_processor *pr;
+ int ret;
+};
+
+static void get_throttling(void *_gt)
+{
+ struct get_throttling *gt = _gt;
+
+ gt->ret = gt->pr->throttling.acpi_processor_get_throttling(gt->pr);
+}
+
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
- cpumask_var_t saved_mask;
- int ret;
+ struct get_throttling gt;
if (!pr)
return -EINVAL;
@@ -863,21 +873,9 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- /*
- * Migrate task to the cpu pointed by pr.
- */
- cpumask_copy(saved_mask, &current->cpus_allowed);
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
- ret = pr->throttling.acpi_processor_get_throttling(pr);
- /* restore the previous state */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(saved_mask);
-
- return ret;
+ gt.pr = pr;
+ smp_call_function_single(pr->id, get_throttling, &gt, 1);
+ return gt.ret;
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1018,9 +1016,23 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
+struct set_throttling_info {
+ struct acpi_processor *pr;
+ struct acpi_processor_throttling *p_throttling;
+ int target_state;
+ int ret;
+};
+
+static void set_throttling(void *_sti)
+{
+ struct set_throttling_info *s = _sti;
+
+ s->ret = s->p_throttling->acpi_processor_set_throttling(s->pr,
+ s->target_state);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
@@ -1037,15 +1049,9 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL))
return -ENOMEM;
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
- return -ENOMEM;
- }
-
- cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
cpumask_and(online_throttling_cpus, cpu_online_mask,
@@ -1067,10 +1073,10 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state);
+ struct set_throttling_info sti
+ = { pr, p_throttling, t_state.target_state };
+ smp_call_function_single(pr->id, set_throttling, &sti, 1);
+ ret = sti.ret;
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1078,6 +1084,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* cpus.
*/
for_each_cpu(i, online_throttling_cpus) {
+ struct set_throttling_info sti;
+
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1099,11 +1107,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(i));
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state);
+ sti.pr = match_pr;
+ sti.p_throttling = &match_pr->throttling;
+ sti.target_state = t_state.target_state;
+ smp_call_function_single(i, set_throttling, &sti, 1);
+ ret = sti.ret;
}
}
/*
@@ -1117,11 +1125,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
return ret;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 43db3ea15b54..fbeefb68a31f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -213,7 +213,7 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
@@ -360,6 +360,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+ /* No need to bounce any requests */
+ blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+
/* No real sector limit. */
blk_queue_max_sectors(vblk->disk->queue, -1U);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 18d65fb42ee7..b24ccb546e59 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -237,6 +237,22 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
return count;
}
+static void generate_smi(void *_smi_cmd)
+{
+ struct smi_cmd *smi_cmd = _smi_cmd;
+
+ /* generate SMI */
+ asm volatile (
+ "outb %b0,%w1"
+ : /* no output args */
+ : "a" (smi_cmd->command_code),
+ "d" (smi_cmd->command_address),
+ "b" (smi_cmd->ebx),
+ "c" (smi_cmd->ecx)
+ : "memory"
+ );
+}
+
/**
* dcdbas_smi_request: generate SMI request
*
@@ -244,9 +260,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
*/
int dcdbas_smi_request(struct smi_cmd *smi_cmd)
{
- cpumask_var_t old_mask;
- int ret = 0;
-
if (smi_cmd->magic != SMI_CMD_MAGIC) {
dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
__func__);
@@ -254,35 +267,9 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* SMI requires CPU 0 */
- if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_copy(old_mask, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(0));
- if (smp_processor_id() != 0) {
- dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
- __func__);
- ret = -EBUSY;
- goto out;
- }
-
- /* generate SMI */
- asm volatile (
- "outb %b0,%w1"
- : /* no output args */
- : "a" (smi_cmd->command_code),
- "d" (smi_cmd->command_address),
- "b" (smi_cmd->ebx),
- "c" (smi_cmd->ecx)
- : "memory"
- );
-
-out:
- set_cpus_allowed_ptr(current, old_mask);
- free_cpumask_var(old_mask);
- return ret;
+ smp_call_function_single(0, generate_smi, smi_cmd, 1);
+ return 0;
}
-
/**
* smi_request_store:
*
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 343e8da1fa30..1f1757118a5c 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
- cpumask_clear(core_mask);
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8574622e36a5..c9e2ae90f195 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -154,9 +154,8 @@ int sync_start(void)
{
int err;
- if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- cpumask_clear(marked_cpus);
start_cpu_work();
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 88bada2ebc4b..510df36dd5d4 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -37,9 +37,6 @@
#ifndef parent_node
#define parent_node(node) ((void)(node),0)
#endif
-#ifndef node_to_cpumask
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
-#endif
#ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
@@ -55,18 +52,4 @@
#endif /* CONFIG_NUMA */
-/*
- * returns pointer to cpumask for specified node
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#ifndef node_to_cpumask_ptr
-
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t _##v = node_to_cpumask(node); \
- const cpumask_t *v = &_##v
-
-#define node_to_cpumask_ptr_next(v, node) \
- _##v = node_to_cpumask(node)
-#endif
-
#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c5ac87ca7bc6..5b44e9fe510d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -3,444 +3,37 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * The new cpumask_ ops take a "struct cpumask *"; the old ones
- * use cpumask_t.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse_user(),
- * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
- * For details of cpulist_scnprintf() and cpulist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
- * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
- * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
- * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
- *
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- * Note: The alternate operations with the suffix "_nr" are used
- * to limit the range of the loop to nr_cpu_ids instead of
- * NR_CPUS when NR_CPUS > 64 for performance reasons.
- * If NR_CPUS is <= 64 then most assembler bitmask
- * operators execute faster with a constant range, so
- * the operator will continue to use NR_CPUS.
- *
- * Another consideration is that nr_cpu_ids is initialized
- * to NR_CPUS and isn't lowered until the possible cpus are
- * discovered (including any disabled cpus). So early uses
- * will span the entire range of NR_CPUS.
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- *
- * The obsolescent cpumask operations are:
- *
- * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
- * void cpus_setall(mask) set all bits
- * void cpus_clear(mask) clear all bits
- * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
- * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
- * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
- * void cpus_complement(dst, src) dst = ~src
- *
- * int cpus_equal(mask1, mask2) Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
- * int cpus_empty(mask) Is mask empty (no bits sets)?
- * int cpus_full(mask) Is mask full (all bits sets)?
- * int cpus_weight(mask) Hamming weigh - number of set bits
- * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
- *
- * void cpus_shift_right(dst, src, n) Shift right
- * void cpus_shift_left(dst, src, n) Shift left
- *
- * int first_cpu(mask) Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
- * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
- *
- * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
- * (can be used as an lvalue)
- * CPU_MASK_ALL Initializer - all bits set
- * CPU_MASK_NONE Initializer - no bits set
- * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
- *
- * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
- * variables, and CPUMASK_PTR provides pointers to each field.
- *
- * The structure should be defined something like this:
- * struct my_cpumasks {
- * cpumask_t mask1;
- * cpumask_t mask2;
- * };
- *
- * Usage is then:
- * CPUMASK_ALLOC(my_cpumasks);
- * CPUMASK_PTR(mask1, my_cpumasks);
- * CPUMASK_PTR(mask2, my_cpumasks);
- *
- * --- DO NOT reference cpumask_t pointers until this check ---
- * if (my_cpumasks == NULL)
- * "kmalloc failed"...
- *
- * References are now pointers to the cpumask_t variables (*mask1, ...)
- *
- *if NR_CPUS > BITS_PER_LONG
- * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
- * kmalloc(sizeof(*m), GFP_KERNEL)
- * CPUMASK_FREE(m) Macro for kfree(m)
- *else
- * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
- * CPUMASK_FREE(m) Nop
- *endif
- * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
- * ------------------------------------------------------------------------
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
- * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
- * int cpulist_parse(buf, map) Parse ascii string as cpulist
- * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
- * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
- * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
- *
- * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
- * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
- *
- * int num_online_cpus() Number of online CPUs
- * int num_possible_cpus() Number of all possible CPUs
- * int num_present_cpus() Number of present CPUs
- *
- * int cpu_online(cpu) Is some cpu online?
- * int cpu_possible(cpu) Is some cpu possible?
- * int cpu_present(cpu) Is some cpu present (can schedule)?
- *
- * int any_online_cpu(mask) First online cpu in mask
- *
- * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- * to generate slightly worse code. Note for example the additional
- * 40 lines of assembly code compiling the "for each possible cpu"
- * loops buried in the disk_stat_read() macros calls when compiling
- * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
- * one-line #define for cpu_isset(), instead of wrapping an inline
- * inside a macro, the way we do the other calls.
+ * set of CPU's in a system, one bit position per CPU number. In general,
+ * only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
- const cpumask_t *srcp, int nbits)
-{
- bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
- return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
- __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
- * @bitmap: the bitmap
- *
- * There are a few places where cpumask_var_t isn't appropriate and
- * static cpumasks must be used (eg. very early boot), yet we don't
- * expose the definition of 'struct cpumask'.
- *
- * This does the conversion, and can be used as a constant initializer.
- */
-#define to_cpumask(bitmap) \
- ((struct cpumask *)(1 ? (bitmap) \
- : (void *)sizeof(__check_is_bitmap(bitmap))))
-
-static inline int __check_is_bitmap(const unsigned long *bitmap)
-{
- return 1;
-}
-
-/*
- * Special-case data structure for "single bit set only" constant CPU masks.
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
*
- * We pre-generate all the 64 (or 32) possible bit positions, with enough
- * padding to the left and the right, and return the constant pointer
- * appropriately offset.
- */
-extern const unsigned long
- cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
-{
- const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
- p -= cpu / BITS_PER_LONG;
- return to_cpumask(p);
-}
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-/*
- * In cases where we take the address of the cpumask immediately,
- * gcc optimizes it out (it's a constant) and there's no huge stack
- * variable created:
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
*/
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
-
-#else
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-/* cpu_mask_all is in init/main.c */
-extern cpumask_t cpu_mask_all;
-#define CPU_MASK_ALL_PTR (&cpu_mask_all)
-
-#endif
-
-#define CPU_MASK_NONE \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
-} }
-
-#define CPU_MASK_CPU0 \
-(cpumask_t) { { \
- [0] = 1UL \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#if NR_CPUS > BITS_PER_LONG
-#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define CPUMASK_FREE(m) kfree(m)
-#else
-#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
-#define CPUMASK_FREE(m)
-#endif
-#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
-
-#define cpu_remap(oldbit, old, new) \
- __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
-static inline int __cpu_remap(int oldbit,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_remap(dst, src, old, new) \
- __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
-static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_onto(dst, orig, relmap) \
- __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
-static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
- const cpumask_t *relmapp, int nbits)
-{
- bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
-}
-
-#define cpus_fold(dst, orig, sz) \
- __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
-static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
- int sz, int nbits)
-{
- bitmap_fold(dstp->bits, origp->bits, sz, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#define cpumask_bits(maskp) ((maskp)->bits)
#if NR_CPUS == 1
-
#define nr_cpu_ids 1
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-#else /* NR_CPUS > 1 */
-
+#else
extern int nr_cpu_ids;
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-int __any_online_cpu(const cpumask_t *mask);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) __any_online_cpu(&(mask))
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
#endif
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#if NR_CPUS <= 64
-
-#define next_cpu_nr(n, src) next_cpu(n, src)
-#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
-#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu_nr((cpu), (mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+ * not all bits may be allocated. */
+#define nr_cpumask_bits nr_cpu_ids
+#else
+#define nr_cpumask_bits NR_CPUS
+#endif
/*
* The following particular system cpumasks and operations manage
@@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
-/* These strip const, as traditionally they weren't const. */
-#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
-#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
-#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
-
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
@@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-
-/* These are the new versions of the cpumask operators: passed by pointer.
- * The older versions will be implemented in terms of these, then deleted. */
-#define cpumask_bits(maskp) ((maskp)->bits)
-
-#if NR_CPUS <= BITS_PER_LONG
-#define CPU_BITS_ALL \
-{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-
-#else /* NR_CPUS > BITS_PER_LONG */
-
-#define CPU_BITS_ALL \
-{ \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-#endif /* NR_CPUS > BITS_PER_LONG */
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
-#else
-#define nr_cpumask_bits NR_CPUS
-#endif
-
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
{
@@ -1088,4 +646,241 @@ void set_cpu_active(unsigned int cpu, bool active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
+
+/**
+ * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * @bitmap: the bitmap
+ *
+ * There are a few places where cpumask_var_t isn't appropriate and
+ * static cpumasks must be used (eg. very early boot), yet we don't
+ * expose the definition of 'struct cpumask'.
+ *
+ * This does the conversion, and can be used as a constant initializer.
+ */
+#define to_cpumask(bitmap) \
+ ((struct cpumask *)(1 ? (bitmap) \
+ : (void *)sizeof(__check_is_bitmap(bitmap))))
+
+static inline int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ p -= cpu / BITS_PER_LONG;
+ return to_cpumask(p);
+}
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_BITS_ALL \
+{ \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+
+#else /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_BITS_ALL \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+/*
+ *
+ * From here down, all obsolete. Use cpumask_ variants!
+ *
+ */
+#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
+/* These strip const, as traditionally they weren't const. */
+#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
+#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
+#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
+
+#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#else
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#endif
+
+#define CPU_MASK_NONE \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+} }
+
+#define CPU_MASK_CPU0 \
+(cpumask_t) { { \
+ [0] = 1UL \
+} }
+
+#if NR_CPUS == 1
+#define first_cpu(src) ({ (void)(src); 0; })
+#define next_cpu(n, src) ({ (void)(src); 1; })
+#define any_online_cpu(mask) 0
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#else /* NR_CPUS > 1 */
+int __first_cpu(const cpumask_t *srcp);
+int __next_cpu(int n, const cpumask_t *srcp);
+int __any_online_cpu(const cpumask_t *mask);
+
+#define first_cpu(src) __first_cpu(&(src))
+#define next_cpu(n, src) __next_cpu((n), &(src))
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = next_cpu((cpu), (mask)), \
+ (cpu) < NR_CPUS; )
+#endif /* SMP */
+
+#if NR_CPUS <= 64
+
+#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
+
+#else /* NR_CPUS > 64 */
+
+int __next_cpu_nr(int n, const cpumask_t *srcp);
+#define for_each_cpu_mask_nr(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = __next_cpu_nr((cpu), &(mask)), \
+ (cpu) < nr_cpu_ids; )
+
+#endif /* NR_CPUS > 64 */
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+ set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+ clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+ return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+ __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2721f07e9354..80b776871a9b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -77,7 +77,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* struct irqaction - per interrupt action descriptor
* @handler: interrupt handler function
* @flags: flags (see IRQF_* above)
- * @mask: no comment as it is useless and about to be removed
* @name: name of the device
* @dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
@@ -90,7 +89,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
struct irqaction {
irq_handler_t handler;
unsigned long flags;
- cpumask_t mask;
const char *name;
void *dev_id;
struct irqaction *next;
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 7bc1440fc473..dbf2479e808e 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -11,7 +11,7 @@
#define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX
-/*G:032 The second method of communicating with the Host is to via "struct
+/*G:031 The second method of communicating with the Host is to via "struct
* lguest_data". Once the Guest's initialization hypercall tells the Host where
* this is, the Guest and Host both publish information in it. :*/
struct lguest_data
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d0754269884..692060d27316 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1713,10 +1713,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return 0;
}
#endif
+
+#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
+#endif
/*
* Architectures can set this to 1 if they have specified
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9e3d8af09207..39c64bae776d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -73,15 +73,6 @@ int smp_call_function(void(*func)(void *info), void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
void (*func)(void *info), void *info, bool wait);
-/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
-static inline int
-smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
- int wait)
-{
- smp_call_function_many(&mask, func, info, wait);
- return 0;
-}
-
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);
@@ -144,8 +135,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-#define smp_call_function_mask(mask, func, info, wait) \
- (up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..b8dd079e7deb 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -183,12 +183,6 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_siblings
-#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
-#ifndef topology_core_siblings
-#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
#ifndef topology_thread_cpumask
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
#endif
diff --git a/init/main.c b/init/main.c
index 2c5ade79eb81..e341b4b06cc6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -359,11 +359,6 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
-#if NR_CPUS > BITS_PER_LONG
-cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_mask_all);
-#endif
-
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9b1a7de26979..eb8751aa0418 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind);
* @k: thread created by kthread_create().
*
* Sets kthread_should_stop() for @k to return true, wakes it, and
- * waits for it to exit. Your threadfn() must not call do_exit()
- * itself if you use this function! This can also be called after
- * kthread_create() instead of calling wake_up_process(): the thread
- * will exit without calling threadfn().
+ * waits for it to exit. This can also be called after kthread_create()
+ * instead of calling wake_up_process(): the thread will exit without
+ * calling threadfn().
+ *
+ * If threadfn() may call do_exit() itself, the caller must ensure
+ * task_struct can't go away.
*
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
* was never called.
diff --git a/kernel/module.c b/kernel/module.c
index 38928fcaff2b..6350abfd2098 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
{
const unsigned long *crc;
- if (!find_symbol("module_layout", NULL, &crc, true, false))
+ if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
+ &crc, true, false))
BUG();
return check_version(sechdrs, versindex, "module_layout", mod, crc);
}
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..415ae238753b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -335,13 +335,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
generic_exec_single(cpu, data, wait);
}
-/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
-
-#ifndef arch_send_call_function_ipi_mask
-# define arch_send_call_function_ipi_mask(maskp) \
- arch_send_call_function_ipi(*(maskp))
-#endif
-
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3aa0a0dfdfa8..ffe3a951bb32 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1918,11 +1918,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace)
*iter->trace = *current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- cpumask_clear(iter->started);
-
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
@@ -4278,7 +4276,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
- if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */
@@ -4289,7 +4287,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
- cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/mm/quicklist.c b/mm/quicklist.c
index e66d07d1b4ff..92db0f96a0cc 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
int node = numa_node_id();
struct zone *zones = NODE_DATA(node)->node_zones;
int num_cpus_on_node;
- const struct cpumask *cpumask_on_node = cpumask_of_node(node);
node_free_pages =
#ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
max = node_free_pages / FRACTION_OF_NODE_MEM;
- num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
+ num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
max /= num_cpus_on_node;
return max(max, min_pages);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 947af6fb3f21..361f0cd311bd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
bool called = true;
struct kvm_vcpu *vcpu;
- if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
- cpumask_clear(cpus);
+ zalloc_cpumask_var(&cpus, GFP_ATOMIC);
me = get_cpu();
spin_lock(&kvm->requests_lock);