summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-06-03 13:57:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-06-03 13:57:50 -0700
commit4ab6cfc4ad9f867a107b0ef029088dd4c0a8f83c (patch)
tree3f30f7307c893160c3a00662f63c1a92285838bf /arch/s390/include
parent93ce7948e38ffe6f9b4fd403c94c098bd892a5ff (diff)
parente0ffcf3fe18e0310221461c08969edec2cc7628c (diff)
Merge tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Heiko Carstens: "Just a couple of small improvements, bug fixes and cleanups: - Add Eric Farman as maintainer for s390 virtio drivers. - Improve machine check handling, and avoid incorrectly injecting a machine check into a kvm guest. - Add cond_resched() call to gmap page table walker in order to avoid possible huge latencies. Also use non-quiesing sske instruction to speed up storage key handling. - Add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP so s390 behaves similar like common code. - Get sie control block address from correct stack slot in perf event code. This fixes potential random memory accesses. - Change uaccess code so that the exception handler sets the result of get_user() and __get_kernel_nofault() to zero in case of a fault. Until now this was done via input parameters for inline assemblies. Doing it via fault handling is what most or even all other architectures are doing. - Couple of other small cleanups and fixes" * tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/stack: add union to reflect kvm stack slot usages s390/stack: merge empty stack frame slots s390/uaccess: whitespace cleanup s390/uaccess: use __noreturn instead of __attribute__((noreturn)) s390/uaccess: use exception handler to zero result on get_user() failure s390/uaccess: use symbolic names for inline assembler operands s390/mcck: isolate SIE instruction when setting CIF_MCCK_GUEST flag s390/mm: use non-quiescing sske for KVM switch to keyed guest s390/gmap: voluntarily schedule during key setting MAINTAINERS: Update s390 virtio-ccw s390/kexec: add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP s390/Kconfig.debug: fix indentation s390/Kconfig: fix indentation s390/perf: obtain sie_block from the right address s390: generate register offsets into pt_regs automatically s390: simplify early program check handler s390/crypto: fix scatterwalk_unmap() callers in AES-GCM
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/asm-extable.h91
-rw-r--r--arch/s390/include/asm/kexec.h2
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/stacktrace.h11
-rw-r--r--arch/s390/include/asm/uaccess.h217
5 files changed, 191 insertions, 136 deletions
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index f24d9591aaed..b74f1070ddb2 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -3,12 +3,24 @@
#define __ASM_EXTABLE_H
#include <linux/stringify.h>
+#include <linux/bits.h>
#include <asm/asm-const.h>
-#define EX_TYPE_NONE 0
-#define EX_TYPE_FIXUP 1
-#define EX_TYPE_BPF 2
-#define EX_TYPE_UACCESS 3
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UA_STORE 3
+#define EX_TYPE_UA_LOAD_MEM 4
+#define EX_TYPE_UA_LOAD_REG 5
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(3, 0)
+
+#define EX_DATA_REG_ADDR_SHIFT 4
+#define EX_DATA_REG_ADDR GENMASK(7, 4)
+
+#define EX_DATA_LEN_SHIFT 8
+#define EX_DATA_LEN GENMASK(11, 8)
#define __EX_TABLE(_section, _fault, _target, _type) \
stringify_in_c(.section _section,"a";) \
@@ -19,35 +31,58 @@
stringify_in_c(.short 0;) \
stringify_in_c(.previous)
-#define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \
- stringify_in_c(.section _section,"a";) \
- stringify_in_c(.align 4;) \
- stringify_in_c(.long (_fault) - .;) \
- stringify_in_c(.long (_target) - .;) \
- stringify_in_c(.short (_type);) \
- stringify_in_c(.macro extable_reg reg;) \
- stringify_in_c(.set .Lfound, 0;) \
- stringify_in_c(.set .Lregnr, 0;) \
- stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
- stringify_in_c(.ifc "\reg", "%%\rs";) \
- stringify_in_c(.set .Lfound, 1;) \
- stringify_in_c(.short .Lregnr;) \
- stringify_in_c(.endif;) \
- stringify_in_c(.set .Lregnr, .Lregnr+1;) \
- stringify_in_c(.endr;) \
- stringify_in_c(.ifne (.Lfound != 1);) \
- stringify_in_c(.error "extable_reg: bad register argument";) \
- stringify_in_c(.endif;) \
- stringify_in_c(.endm;) \
- stringify_in_c(extable_reg _reg;) \
- stringify_in_c(.purgem extable_reg;) \
+#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
+ stringify_in_c(.section _section,"a";) \
+ stringify_in_c(.align 4;) \
+ stringify_in_c(.long (_fault) - .;) \
+ stringify_in_c(.long (_target) - .;) \
+ stringify_in_c(.short (_type);) \
+ stringify_in_c(.macro extable_reg regerr, regaddr;) \
+ stringify_in_c(.set .Lfound, 0;) \
+ stringify_in_c(.set .Lcurr, 0;) \
+ stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
+ stringify_in_c( .ifc "\regerr", "%%r\rs";) \
+ stringify_in_c( .set .Lfound, 1;) \
+ stringify_in_c( .set .Lregerr, .Lcurr;) \
+ stringify_in_c( .endif;) \
+ stringify_in_c( .set .Lcurr, .Lcurr+1;) \
+ stringify_in_c(.endr;) \
+ stringify_in_c(.ifne (.Lfound != 1);) \
+ stringify_in_c( .error "extable_reg: bad register argument1";) \
+ stringify_in_c(.endif;) \
+ stringify_in_c(.set .Lfound, 0;) \
+ stringify_in_c(.set .Lcurr, 0;) \
+ stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
+ stringify_in_c( .ifc "\regaddr", "%%r\rs";) \
+ stringify_in_c( .set .Lfound, 1;) \
+ stringify_in_c( .set .Lregaddr, .Lcurr;) \
+ stringify_in_c( .endif;) \
+ stringify_in_c( .set .Lcurr, .Lcurr+1;) \
+ stringify_in_c(.endr;) \
+ stringify_in_c(.ifne (.Lfound != 1);) \
+ stringify_in_c( .error "extable_reg: bad register argument2";) \
+ stringify_in_c(.endif;) \
+ stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \
+ .Lregaddr << EX_DATA_REG_ADDR_SHIFT | \
+ _len << EX_DATA_LEN_SHIFT;) \
+ stringify_in_c(.endm;) \
+ stringify_in_c(extable_reg _regerr,_regaddr;) \
+ stringify_in_c(.purgem extable_reg;) \
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
+
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
-#define EX_TABLE_UA(_fault, _target, _reg) \
- __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg)
+
+#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
+
+#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+
+#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
+ __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 63098df81c9f..649ecdcc8734 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -31,7 +31,7 @@
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
/* Allocate control page with GFP_DMA */
-#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY)
/* Maximum address we can use for the crash control pages */
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index add764a2be8c..bd66f8e34949 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -304,12 +304,6 @@ static __always_inline void __noreturn disabled_wait(void)
while (1);
}
-/*
- * Basic Program Check Handler.
- */
-extern void s390_base_pgm_handler(void);
-extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
-
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
extern int memcpy_real(void *, unsigned long, size_t);
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index f8500191993d..b23c658dce77 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -39,8 +39,15 @@ static inline bool on_stack(struct stack_info *info,
* Kernel uses the packed stack layout (-mpacked-stack).
*/
struct stack_frame {
- unsigned long empty1[5];
- unsigned int empty2[8];
+ union {
+ unsigned long empty[9];
+ struct {
+ unsigned long sie_control_block;
+ unsigned long sie_savearea;
+ unsigned long sie_reason;
+ unsigned long sie_flags;
+ };
+ };
unsigned long gprs[10];
unsigned long back_chain;
};
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 1f150a7cfb3d..f4511e21d646 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -3,7 +3,7 @@
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/uaccess.h"
*/
@@ -55,9 +55,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
return n;
}
-int __put_user_bad(void) __attribute__((noreturn));
-int __get_user_bad(void) __attribute__((noreturn));
-
union oac {
unsigned int val;
struct {
@@ -80,8 +77,14 @@ union oac {
};
};
-#define __put_get_user_asm(to, from, size, oac_spec) \
+int __noreturn __put_user_bad(void);
+
+#define __put_user_asm(to, from, size) \
({ \
+ union oac __oac_spec = { \
+ .oac1.as = PSW_BITS_AS_SECONDARY, \
+ .oac1.a = 1, \
+ }; \
int __rc; \
\
asm volatile( \
@@ -89,26 +92,15 @@ union oac {
"0: mvcos %[_to],%[_from],%[_size]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \
+ EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
+ EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
: [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
: [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (oac_spec.val) \
+ [spec] "d" (__oac_spec.val) \
: "cc", "0"); \
__rc; \
})
-#define __put_user_asm(to, from, size) \
- __put_get_user_asm(to, from, size, ((union oac) { \
- .oac1.as = PSW_BITS_AS_SECONDARY, \
- .oac1.a = 1 \
- }))
-
-#define __get_user_asm(to, from, size) \
- __put_get_user_asm(to, from, size, ((union oac) { \
- .oac2.as = PSW_BITS_AS_SECONDARY, \
- .oac2.a = 1 \
- })) \
-
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
int rc;
@@ -141,6 +133,31 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
return rc;
}
+int __noreturn __get_user_bad(void);
+
+#define __get_user_asm(to, from, size) \
+({ \
+ union oac __oac_spec = { \
+ .oac2.as = PSW_BITS_AS_SECONDARY, \
+ .oac2.a = 1, \
+ }; \
+ int __rc; \
+ \
+ asm volatile( \
+ " lr 0,%[spec]\n" \
+ "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
+ "1: xr %[rc],%[rc]\n" \
+ "2:\n" \
+ EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
+ EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
+ : [rc] "=&d" (__rc), "=Q" (*(to)) \
+ : [_size] "d" (size), [_from] "Q" (*(from)), \
+ [spec] "d" (__oac_spec.val), [_to] "a" (to), \
+ [_ksize] "K" (size) \
+ : "cc", "0"); \
+ __rc; \
+})
+
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
int rc;
@@ -177,77 +194,77 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
-#define __put_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) __x = (x); \
- int __pu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- case 8: \
- __pu_err = __put_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- break; \
- default: \
- __put_user_bad(); \
- break; \
- } \
- __builtin_expect(__pu_err, 0); \
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__pu_err, 0); \
})
-#define put_user(x, ptr) \
-({ \
- might_fault(); \
- __put_user(x, ptr); \
+#define put_user(x, ptr) \
+({ \
+ might_fault(); \
+ __put_user(x, ptr); \
})
-
-#define __get_user(x, ptr) \
-({ \
- int __gu_err = -EFAULT; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: { \
- unsigned char __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 2: { \
- unsigned short __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 4: { \
- unsigned int __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- case 8: { \
- unsigned long long __x = 0; \
- __gu_err = __get_user_fn(&__x, ptr, \
- sizeof(*(ptr))); \
- (x) = *(__force __typeof__(*(ptr)) *) &__x; \
- break; \
- }; \
- default: \
- __get_user_bad(); \
- break; \
- } \
- __builtin_expect(__gu_err, 0); \
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long __x; \
+ \
+ __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__gu_err, 0); \
})
-#define get_user(x, ptr) \
-({ \
- might_fault(); \
- __get_user(x, ptr); \
+#define get_user(x, ptr) \
+({ \
+ might_fault(); \
+ __get_user(x, ptr); \
})
/*
@@ -278,19 +295,20 @@ int __noreturn __put_kernel_bad(void);
int __rc; \
\
asm volatile( \
- "0: " insn " %2,%1\n" \
- "1: xr %0,%0\n" \
+ "0: " insn " %[_val],%[_to]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
- : "=d" (__rc), "+Q" (*(to)) \
- : "d" (val) \
+ EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
+ EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
+ : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
+ : [_val] "d" (val) \
: "cc"); \
__rc; \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- u64 __x = (u64)(*((type *)(src))); \
+ unsigned long __x = (unsigned long)(*((type *)(src))); \
int __pk_err; \
\
switch (sizeof(type)) { \
@@ -321,12 +339,13 @@ int __noreturn __get_kernel_bad(void);
int __rc; \
\
asm volatile( \
- "0: " insn " %1,%2\n" \
- "1: xr %0,%0\n" \
+ "0: " insn " %[_val],%[_from]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
- EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
- : "=d" (__rc), "+d" (val) \
- : "Q" (*(from)) \
+ EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
+ EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
+ : [rc] "=d" (__rc), [_val] "=d" (val) \
+ : [_from] "Q" (*(from)) \
: "cc"); \
__rc; \
})
@@ -337,28 +356,28 @@ do { \
\
switch (sizeof(type)) { \
case 1: { \
- u8 __x = 0; \
+ unsigned char __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 2: { \
- u16 __x = 0; \
+ unsigned short __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 4: { \
- u32 __x = 0; \
+ unsigned int __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 8: { \
- u64 __x = 0; \
+ unsigned long __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
*((type *)(dst)) = (type)__x; \