summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-04 11:30:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-04 11:30:48 -0700
commitc1e2e0350ce37f633b5ce3ce1cdf4428513fc2a2 (patch)
tree5f67e368e57fbbffc9a6b251409f968fe0c3ac83 /arch
parent7e113d01f5f9fe6ad018d8289239d0bbb41311d7 (diff)
parent2a2e8202c7a16a85a881ad2b6e32ccbebdc01dda (diff)
Merge tag 'for-5.16/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull more parisc architecture fixes and updates from Helge Deller: "One build error fix and two optimizations: - Fix build error by moving the CPU field back into thread_info struct (Ard Biesheuvel) - Do not enable IRQs unconditionally at start of interrupt handler if they were disabled before (Sven Schnelle) - Keep interrupts enabled during cmpxchg and futex operations (Dave Anglin)" * tag 'for-5.16/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: move CPU field back into thread_info parisc: Don't disable interrupts in cmpxchg and futex operations parisc: don't enable irqs unconditionally in handle_interruption()
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/include/asm/futex.h24
-rw-r--r--arch/parisc/include/asm/smp.h19
-rw-r--r--arch/parisc/include/asm/thread_info.h3
-rw-r--r--arch/parisc/kernel/asm-offsets.c5
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/syscall.S10
-rw-r--r--arch/parisc/kernel/traps.c2
7 files changed, 17 insertions, 48 deletions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index e38a118cf65d..70cf8f0a7617 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -11,35 +11,34 @@
sixteen four-word locks. */
static inline void
-_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
+_futex_spin_lock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
- local_irq_save(*flags);
+ preempt_disable();
arch_spin_lock(s);
}
static inline void
-_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
+_futex_spin_unlock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
- local_irq_restore(*flags);
+ preempt_enable();
}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- unsigned long int flags;
int oldval, ret;
u32 tmp;
- _futex_spin_lock_irqsave(uaddr, &flags);
-
ret = -EFAULT;
+
+ _futex_spin_lock(uaddr);
if (unlikely(get_user(oldval, uaddr) != 0))
goto out_pagefault_enable;
@@ -70,7 +69,7 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
ret = -EFAULT;
out_pagefault_enable:
- _futex_spin_unlock_irqrestore(uaddr, &flags);
+ _futex_spin_unlock(uaddr);
if (!ret)
*oval = oldval;
@@ -83,7 +82,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
u32 val;
- unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -100,19 +98,19 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
* address. This should scale to a couple of CPUs.
*/
- _futex_spin_lock_irqsave(uaddr, &flags);
+ _futex_spin_lock(uaddr);
if (unlikely(get_user(val, uaddr) != 0)) {
- _futex_spin_unlock_irqrestore(uaddr, &flags);
+ _futex_spin_unlock(uaddr);
return -EFAULT;
}
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
- _futex_spin_unlock_irqrestore(uaddr, &flags);
+ _futex_spin_unlock(uaddr);
return -EFAULT;
}
*uval = val;
- _futex_spin_unlock_irqrestore(uaddr, &flags);
+ _futex_spin_unlock(uaddr);
return 0;
}
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 16d41127500e..2279ebe5e2da 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -32,25 +32,10 @@ extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
#endif /* !ASSEMBLY */
-/*
- * This is particularly ugly: it appears we can't actually get the definition
- * of task_struct here, but we need access to the CPU this task is running on.
- * Instead of using task_struct we're using TASK_CPU which is extracted from
- * asm-offsets.h by kbuild to get the current processor ID.
- *
- * This also needs to be safeguarded when building asm-offsets.s because at
- * that time TASK_CPU is not defined yet. It could have been guarded by
- * TASK_CPU itself, but we want the build to fail if TASK_CPU is missing
- * when building something else than asm-offsets.s
- */
-#ifdef GENERATING_ASM_OFFSETS
-#define raw_smp_processor_id() (0)
-#else
-#include <asm/asm-offsets.h>
-#define raw_smp_processor_id() (*(unsigned int *)((void *)current + TASK_CPU))
-#endif
#else /* CONFIG_SMP */
static inline void smp_send_all_nop(void) { return; }
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 75657c2c54e1..1a58795f785c 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -9,6 +9,9 @@
struct thread_info {
unsigned long flags; /* thread_info flags (see TIF_*) */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
};
#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index e35154035441..55c1c5189c6a 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -14,8 +14,6 @@
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*/
-#define GENERATING_ASM_OFFSETS /* asm/smp.h */
-
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
@@ -39,9 +37,6 @@ int main(void)
{
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
-#ifdef CONFIG_SMP
- DEFINE(TASK_CPU, offsetof(struct task_struct, cpu));
-#endif
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 171925285f3e..a32a882a2d58 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -339,8 +339,6 @@ int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
- idle->cpu = cpuid;
-
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index ec9675f58435..4fb3b6a993bf 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -597,13 +597,11 @@ cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
- rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
- ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
@@ -639,8 +637,6 @@ cas_action:
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
#endif
- /* Enable interrupts */
- ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
@@ -652,7 +648,6 @@ cas_action:
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
- ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
@@ -764,13 +759,11 @@ cas2_lock_start:
shlw %r20, 4, %r20
add %r20, %r28, %r20
- rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
cas2_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
- ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
@@ -850,8 +843,6 @@ cas2_action:
cas2_end:
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
- /* Enable interrupts */
- ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
@@ -860,7 +851,6 @@ cas2_end:
/* Error occurred on load or store */
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
- ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 690e6abcaf22..b11fb26ce299 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -481,7 +481,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (code == 1)
pdc_console_restart(); /* switch back to pdc if HPMC */
- else
+ else if (!irqs_disabled_flags(regs->gr[0]))
local_irq_enable();
/* Security check: