summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/dumpstack.c4
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/smp.c6
4 files changed, 12 insertions, 12 deletions
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 301b945de77b..ef85a00442cd 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -77,11 +77,11 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
- S390_lowcore.nodat_stack + frame_size - STACK_SIZE,
+ S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
S390_lowcore.nodat_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
- S390_lowcore.async_stack + frame_size - STACK_SIZE,
+ S390_lowcore.async_stack + frame_size - THREAD_SIZE,
S390_lowcore.async_stack + frame_size);
task = task ?: current;
__dump_trace(func, data, sp,
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b2bc0eb1ca7a..0e8d68bac82c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -171,7 +171,7 @@ void do_softirq_own_stack(void)
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + STACK_ORDER)) != 0) {
+ if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
CALL_ON_STACK(__do_softirq, new, 0);
} else {
/* We are already on the async stack. */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index eca51c485d09..67fa7cb8ae80 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -308,13 +308,13 @@ unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
return (unsigned long)
- __vmalloc_node_range(STACK_SIZE, STACK_SIZE,
+ __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
VMALLOC_START, VMALLOC_END,
THREADINFO_GFP,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
#else
- return __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
}
@@ -323,7 +323,7 @@ void stack_free(unsigned long stack)
#ifdef CONFIG_VMAP_STACK
vfree((void *) stack);
#else
- free_pages(stack, STACK_ORDER);
+ free_pages(stack, THREAD_SIZE_ORDER);
#endif
}
@@ -331,7 +331,7 @@ int __init arch_early_irq_init(void)
{
unsigned long stack;
- stack = __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
@@ -347,7 +347,7 @@ static int __init async_stack_realloc(void)
if (!new)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
- free_pages(old, STACK_ORDER);
+ free_pages(old, THREAD_SIZE_ORDER);
return 0;
}
early_initcall(async_stack_realloc);
@@ -428,7 +428,7 @@ static void __init setup_lowcore(void)
* Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart.
*/
- restart_stack = memblock_virt_alloc(STACK_SIZE, STACK_SIZE);
+ restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fccdb96a04cb..032d98bfc60a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -194,7 +194,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!pcpu->lowcore || !nodat_stack)
goto out;
} else {
@@ -226,7 +226,7 @@ out_async:
stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
- free_pages(nodat_stack, STACK_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
@@ -249,7 +249,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
stack_free(async_stack);
if (pcpu == &pcpu_devices[0])
return;
- free_pages(nodat_stack, STACK_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages(lowcore, LC_ORDER);
}