From 2253e8d79237c69086ded391e6767afe16972527 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 27 Jan 2014 13:26:10 +0100 Subject: s390/cio: fix driver callback initialization for ccw consoles ccw consoles are in use before they can be properly registered with the driver core. For devices which are in use by a device driver we rely on the ccw_device's pointer to the driver callbacks to be valid. For ccw consoles this pointer is NULL until they are registered later during boot and we dereferenced this pointer. This worked by chance on 64 bit builds (cdev->drv was NULL but the optional callback cdev->drv->path_event was also NULL by coincidence) and was unnoticed until we received reports about boot failures on 31 bit systems. Fix it by initializing the driver pointer for ccw consoles. Cc: # 3.10+ Reported-by: Mike Frysinger Reported-by: Heiko Carstens Reviewed-by: Peter Oberparleiter Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ccwdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index f201af8be580..31b5ca8f8c3d 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdev(n) container_of(n, struct ccw_device, dev) #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) -extern struct ccw_device *ccw_device_probe_console(void); +extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *); extern void ccw_device_wait_idle(struct ccw_device *); extern int ccw_device_force_console(struct ccw_device *); -- cgit v1.2.3 From 1e5320960510d6d6f2cbdc7ed33df9791283b7ea Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 27 Jan 2014 13:28:10 +0100 Subject: s390/cio: reorder initialization of ccw consoles Drivers for ccw consoles use ccw_device_probe_console to receive an initialized ccw device which is already enabled for interrupts. After that the device driver does the initialization of its private data. This can race with unsolicited interrupts which can happen once the device is enabled for interrupts. Split ccw_device_probe_console into ccw_device_create_console and ccw_device_enable_console and reorder the initialization of the ccw console drivers. While at it mark these functions as __init. Reviewed-by: Peter Oberparleiter Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ccwdev.h | 4 +++- drivers/s390/char/con3215.c | 8 +++++++- drivers/s390/char/raw3270.c | 9 ++++++++- drivers/s390/cio/device.c | 29 +++++++++++++++++------------ 4 files changed, 35 insertions(+), 15 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 31b5ca8f8c3d..a9c2c0686177 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdev(n) container_of(n, struct ccw_device, dev) #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) -extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *); +extern struct ccw_device *ccw_device_create_console(struct ccw_driver *); +extern void ccw_device_destroy_console(struct ccw_device *); +extern int ccw_device_enable_console(struct ccw_device *); extern void ccw_device_wait_idle(struct ccw_device *); extern int ccw_device_force_console(struct ccw_device *); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index bb86494e2b7b..5af7f0bd6125 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -922,7 +922,7 @@ static int __init con3215_init(void) raw3215_freelist = req; } - cdev = ccw_device_probe_console(&raw3215_ccw_driver); + cdev = ccw_device_create_console(&raw3215_ccw_driver); if (IS_ERR(cdev)) return -ENODEV; @@ -932,6 +932,12 @@ static int __init con3215_init(void) cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; + if (ccw_device_enable_console(cdev)) { + ccw_device_destroy_console(cdev); + raw3215_free_info(raw); + raw3215[0] = NULL; + return -ENODEV; + } /* Request the console irq */ if (raw3215_startup(raw) != 0) { diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index de2c0483949f..041c65bc7bb1 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -790,7 +790,7 @@ struct raw3270 __init *raw3270_setup_console(void) char *ascebc; int rc; - cdev = ccw_device_probe_console(&raw3270_ccw_driver); + cdev = ccw_device_create_console(&raw3270_ccw_driver); if (IS_ERR(cdev)) return ERR_CAST(cdev); @@ -800,6 +800,13 @@ struct raw3270 __init *raw3270_setup_console(void) if (rc) return ERR_PTR(rc); set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); + + rc = ccw_device_enable_console(cdev); + if (rc) { + ccw_device_destroy_console(cdev); + return ERR_PTR(rc); + } + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); do { __raw3270_reset_device(rp); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4283dd3cdd49..da431992fd8e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1572,11 +1572,14 @@ out: } #ifdef CONFIG_CCW_CONSOLE -static int ccw_device_console_enable(struct ccw_device *cdev, - struct subchannel *sch) +int __init ccw_device_enable_console(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); int rc; + if (!cdev->drv || !cdev->handler) + return -EINVAL; + io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) @@ -1609,12 +1612,11 @@ out_unlock: return rc; } -struct ccw_device *ccw_device_probe_console(struct ccw_driver *drv) +struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) { struct io_subchannel_private *io_priv; struct ccw_device *cdev; struct subchannel *sch; - int ret; sch = cio_probe_console(); if (IS_ERR(sch)) @@ -1633,17 +1635,20 @@ struct ccw_device *ccw_device_probe_console(struct ccw_driver *drv) } cdev->drv = drv; set_io_private(sch, io_priv); - ret = ccw_device_console_enable(cdev, sch); - if (ret) { - set_io_private(sch, NULL); - put_device(&sch->dev); - put_device(&cdev->dev); - kfree(io_priv); - return ERR_PTR(ret); - } return cdev; } +void __init ccw_device_destroy_console(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct io_subchannel_private *io_priv = to_io_private(sch); + + set_io_private(sch, NULL); + put_device(&sch->dev); + put_device(&cdev->dev); + kfree(io_priv); +} + /** * ccw_device_wait_idle() - busy wait for device to become idle * @cdev: ccw device -- cgit v1.2.3 From cfa785e623577cdad2aa721acb23bd3a95eced9a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 22 Jan 2014 14:49:30 +0100 Subject: s390/uaccess: normalize order of parameters of indirect uaccess function calls For some unknown reason the indirect uaccess functions on s390 implement a different parameter order than what is usual. e.g.: unsigned long copy_to_user(void *to, const void *from, unsigned long n); vs. size_t (*copy_to_user)(size_t n, void __user * to, const void *from); Let's get rid of this confusing parameter reordering. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 54 ++++++++++++++++++++--------------------- arch/s390/lib/uaccess_mvcos.c | 20 +++++++-------- arch/s390/lib/uaccess_pt.c | 34 +++++++++++++------------- 3 files changed, 54 insertions(+), 54 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 79330af9a5f8..73199636ba98 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -93,12 +93,12 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) #define ARCH_HAS_SEARCH_EXTABLE struct uaccess_ops { - size_t (*copy_from_user)(size_t, const void __user *, void *); - size_t (*copy_to_user)(size_t, void __user *, const void *); - size_t (*copy_in_user)(size_t, void __user *, const void __user *); - size_t (*clear_user)(size_t, void __user *); - size_t (*strnlen_user)(size_t, const char __user *); - size_t (*strncpy_from_user)(size_t, const char __user *, char *); + size_t (*copy_from_user)(void *, const void __user *, size_t); + size_t (*copy_to_user)(void __user *, const void *, size_t); + size_t (*copy_in_user)(void __user *, const void __user *, size_t); + size_t (*clear_user)(void __user *, size_t); + size_t (*strnlen_user)(const char __user *, size_t); + size_t (*strncpy_from_user)(char *, const char __user *, size_t); int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); }; @@ -109,15 +109,15 @@ extern struct uaccess_ops uaccess_pt; extern int __handle_fault(unsigned long, unsigned long, int); -static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +static inline int __put_user_fn(void *x, void __user *ptr, size_t size) { - size = uaccess.copy_to_user(size, ptr, x); + size = uaccess.copy_to_user(ptr, x, size); return size ? -EFAULT : size; } -static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +static inline int __get_user_fn(void *x, const void __user *ptr, size_t size) { - size = uaccess.copy_from_user(size, ptr, x); + size = uaccess.copy_from_user(x, ptr, size); return size ? -EFAULT : size; } @@ -135,8 +135,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) case 2: \ case 4: \ case 8: \ - __pu_err = __put_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __pu_err = __put_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ break; \ default: \ __put_user_bad(); \ @@ -161,29 +161,29 @@ extern int __put_user_bad(void) __attribute__((noreturn)); switch (sizeof(*(ptr))) { \ case 1: { \ unsigned char __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ @@ -222,7 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn)); static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { - return uaccess.copy_to_user(n, to, from); + return uaccess.copy_to_user(to, from, n); } #define __copy_to_user_inatomic __copy_to_user @@ -268,7 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) { - return uaccess.copy_from_user(n, from, to); + return uaccess.copy_from_user(to, from, n); } extern void copy_from_user_overflow(void) @@ -309,7 +309,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n) { - return uaccess.copy_in_user(n, to, from); + return uaccess.copy_in_user(to, from, n); } static inline unsigned long __must_check @@ -326,14 +326,14 @@ static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) { might_fault(); - return uaccess.strncpy_from_user(count, src, dst); + return uaccess.strncpy_from_user(dst, src, count); } static inline unsigned long strnlen_user(const char __user * src, unsigned long n) { might_fault(); - return uaccess.strnlen_user(n, src); + return uaccess.strnlen_user(src, n); } /** @@ -359,14 +359,14 @@ strnlen_user(const char __user * src, unsigned long n) static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { - return uaccess.clear_user(n, to); + return uaccess.clear_user(to, n); } static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); - return uaccess.clear_user(n, to); + return uaccess.clear_user(to, n); } extern int copy_to_user_real(void __user *dest, void *src, size_t count); diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 4b7993bf69b9..95123f57aaf8 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -26,7 +26,7 @@ #define SLR "slgr" #endif -static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) +static size_t copy_from_user_mvcos(void *x, const void __user *ptr, size_t size) { register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; @@ -65,7 +65,7 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) return size; } -static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) +static size_t copy_to_user_mvcos(void __user *ptr, const void *x, size_t size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -94,8 +94,8 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) return size; } -static size_t copy_in_user_mvcos(size_t size, void __user *to, - const void __user *from) +static size_t copy_in_user_mvcos(void __user *to, const void __user *from, + size_t size) { register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; @@ -117,7 +117,7 @@ static size_t copy_in_user_mvcos(size_t size, void __user *to, return size; } -static size_t clear_user_mvcos(size_t size, void __user *to) +static size_t clear_user_mvcos(void __user *to, size_t size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -145,7 +145,7 @@ static size_t clear_user_mvcos(size_t size, void __user *to) return size; } -static size_t strnlen_user_mvcos(size_t count, const char __user *src) +static size_t strnlen_user_mvcos(const char __user *src, size_t count) { size_t done, len, offset, len_str; char buf[256]; @@ -155,7 +155,7 @@ static size_t strnlen_user_mvcos(size_t count, const char __user *src) offset = (size_t)src & ~PAGE_MASK; len = min(256UL, PAGE_SIZE - offset); len = min(count - done, len); - if (copy_from_user_mvcos(len, src, buf)) + if (copy_from_user_mvcos(buf, src, len)) return 0; len_str = strnlen(buf, len); done += len_str; @@ -164,8 +164,8 @@ static size_t strnlen_user_mvcos(size_t count, const char __user *src) return done + 1; } -static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, - char *dst) +static size_t strncpy_from_user_mvcos(char *dst, const char __user *src, + size_t count) { size_t done, len, offset, len_str; @@ -175,7 +175,7 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, do { offset = (size_t)src & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); - if (copy_from_user_mvcos(len, src, dst)) + if (copy_from_user_mvcos(dst, src, len)) return -EFAULT; len_str = strnlen(dst, len); done += len_str; diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 61ebcc9ccb34..2fa696b39b56 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -22,7 +22,7 @@ #define SLR "slgr" #endif -static size_t strnlen_kernel(size_t count, const char __user *src) +static size_t strnlen_kernel(const char __user *src, size_t count) { register unsigned long reg0 asm("0") = 0UL; unsigned long tmp1, tmp2; @@ -42,8 +42,8 @@ static size_t strnlen_kernel(size_t count, const char __user *src) return count; } -static size_t copy_in_kernel(size_t count, void __user *to, - const void __user *from) +static size_t copy_in_kernel(void __user *to, const void __user *from, + size_t count) { unsigned long tmp1; @@ -211,26 +211,26 @@ fault: return 0; } -static size_t copy_from_user_pt(size_t n, const void __user *from, void *to) +static size_t copy_from_user_pt(void *to, const void __user *from, size_t n) { size_t rc; if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel(n, (void __user *) to, from); + return copy_in_kernel((void __user *) to, from, n); rc = __user_copy_pt((unsigned long) from, to, n, 0); if (unlikely(rc)) memset(to + n - rc, 0, rc); return rc; } -static size_t copy_to_user_pt(size_t n, void __user *to, const void *from) +static size_t copy_to_user_pt(void __user *to, const void *from, size_t n) { if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel(n, to, (void __user *) from); + return copy_in_kernel(to, (void __user *) from, n); return __user_copy_pt((unsigned long) to, (void *) from, n, 1); } -static size_t clear_user_pt(size_t n, void __user *to) +static size_t clear_user_pt(void __user *to, size_t n) { void *zpage = (void *) empty_zero_page; long done, size, ret; @@ -242,7 +242,7 @@ static size_t clear_user_pt(size_t n, void __user *to) else size = n - done; if (segment_eq(get_fs(), KERNEL_DS)) - ret = copy_in_kernel(n, to, (void __user *) zpage); + ret = copy_in_kernel(to, (void __user *) zpage, n); else ret = __user_copy_pt((unsigned long) to, zpage, size, 1); done += size; @@ -253,7 +253,7 @@ static size_t clear_user_pt(size_t n, void __user *to) return 0; } -static size_t strnlen_user_pt(size_t count, const char __user *src) +static size_t strnlen_user_pt(const char __user *src, size_t count) { unsigned long uaddr = (unsigned long) src; struct mm_struct *mm = current->mm; @@ -263,7 +263,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) if (unlikely(!count)) return 0; if (segment_eq(get_fs(), KERNEL_DS)) - return strnlen_kernel(count, src); + return strnlen_kernel(src, count); if (!mm) return 0; done = 0; @@ -289,8 +289,8 @@ fault: goto retry; } -static size_t strncpy_from_user_pt(size_t count, const char __user *src, - char *dst) +static size_t strncpy_from_user_pt(char *dst, const char __user *src, + size_t count) { size_t done, len, offset, len_str; @@ -301,7 +301,7 @@ static size_t strncpy_from_user_pt(size_t count, const char __user *src, offset = (size_t)src & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); if (segment_eq(get_fs(), KERNEL_DS)) { - if (copy_in_kernel(len, (void __user *) dst, src)) + if (copy_in_kernel((void __user *) dst, src, len)) return -EFAULT; } else { if (__user_copy_pt((unsigned long) src, dst, len, 0)) @@ -315,8 +315,8 @@ static size_t strncpy_from_user_pt(size_t count, const char __user *src, return done; } -static size_t copy_in_user_pt(size_t n, void __user *to, - const void __user *from) +static size_t copy_in_user_pt(void __user *to, const void __user *from, + size_t n) { struct mm_struct *mm = current->mm; unsigned long offset_max, uaddr, done, size, error_code; @@ -326,7 +326,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, int write_user; if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel(n, to, from); + return copy_in_kernel(to, from, n); if (!mm) return n; done = 0; -- cgit v1.2.3 From 4f41c2b4567dbfb7ff93e5c552b869e2865bcd9d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 23 Jan 2014 11:18:36 +0100 Subject: s390/uaccess: get rid of indirect function calls There are only two uaccess variants on s390 left: the version that is used if the mvcos instruction is available, and the page table walk variant. So there is no need for expensive indirect function calls. By default the mvcos variant will be called. If the mvcos instruction is not available it will call the page table walk variant. For minimal performance impact the "if (mvcos_is_available)" is implemented with a jump label, which will be a six byte nop on machines with mvcos. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/futex.h | 13 ++-- arch/s390/include/asm/uaccess.h | 148 +++++++++++++++++----------------------- arch/s390/kernel/setup.c | 9 --- arch/s390/lib/Makefile | 3 +- arch/s390/lib/uaccess.h | 8 ++- arch/s390/lib/uaccess_mvcos.c | 89 ++++++++++++++++++------ arch/s390/lib/uaccess_pt.c | 31 +++------ 7 files changed, 152 insertions(+), 149 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 51bcaa0fdeef..fda46bd38c99 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -5,7 +5,10 @@ #include #include -static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval); +int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old); + +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) oparg = 1 << oparg; pagefault_disable(); - ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); + ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval); pagefault_enable(); if (!ret) { @@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); -} - #endif /* _ASM_S390_FUTEX_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 73199636ba98..49885a518e5e 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) #define ARCH_HAS_SORT_EXTABLE #define ARCH_HAS_SEARCH_EXTABLE -struct uaccess_ops { - size_t (*copy_from_user)(void *, const void __user *, size_t); - size_t (*copy_to_user)(void __user *, const void *, size_t); - size_t (*copy_in_user)(void __user *, const void __user *, size_t); - size_t (*clear_user)(void __user *, size_t); - size_t (*strnlen_user)(const char __user *, size_t); - size_t (*strncpy_from_user)(char *, const char __user *, size_t); - int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); -}; +int __handle_fault(unsigned long, unsigned long, int); -extern struct uaccess_ops uaccess; -extern struct uaccess_ops uaccess_mvcos; -extern struct uaccess_ops uaccess_pt; +/** + * __copy_from_user: - Copy a block of data from user space, with less checking. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +size_t __must_check __copy_from_user(void *to, const void __user *from, + size_t n); + +/** + * __copy_to_user: - Copy a block of data into user space, with less checking. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +unsigned long __must_check __copy_to_user(void __user *to, const void *from, + unsigned long n); -extern int __handle_fault(unsigned long, unsigned long, int); +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user static inline int __put_user_fn(void *x, void __user *ptr, size_t size) { - size = uaccess.copy_to_user(ptr, x, size); - return size ? -EFAULT : size; + size = __copy_to_user(ptr, x, size); + return size ? -EFAULT : 0; } static inline int __get_user_fn(void *x, const void __user *ptr, size_t size) { - size = uaccess.copy_from_user(x, ptr, size); - return size ? -EFAULT : size; + size = __copy_from_user(x, ptr, size); + return size ? -EFAULT : 0; } /* @@ -152,7 +177,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, size_t size) }) -extern int __put_user_bad(void) __attribute__((noreturn)); +int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ ({ \ @@ -200,34 +225,11 @@ extern int __put_user_bad(void) __attribute__((noreturn)); __get_user(x, ptr); \ }) -extern int __get_user_bad(void) __attribute__((noreturn)); +int __get_user_bad(void) __attribute__((noreturn)); #define __put_user_unaligned __put_user #define __get_user_unaligned __get_user -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - return uaccess.copy_to_user(to, from, n); -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - /** * copy_to_user: - Copy a block of data into user space. * @to: Destination address, in user space. @@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return __copy_to_user(to, from, n); } -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - return uaccess.copy_from_user(to, from, n); -} - -extern void copy_from_user_overflow(void) +void copy_from_user_overflow(void) #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS __compiletime_warning("copy_from_user() buffer size is not provably correct") #endif @@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) return __copy_from_user(to, from, n); } -static inline unsigned long __must_check -__copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - return uaccess.copy_in_user(to, from, n); -} +unsigned long __must_check +__copy_in_user(void __user *to, const void __user *from, unsigned long n); static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) @@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n) /* * Copy a null terminated string from userspace. */ + +long __strncpy_from_user(char *dst, const char __user *src, long count); + static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) { might_fault(); - return uaccess.strncpy_from_user(dst, src, count); + return __strncpy_from_user(dst, src, count); } -static inline unsigned long -strnlen_user(const char __user * src, unsigned long n) +size_t __must_check __strnlen_user(const char __user *src, size_t count); + +static inline size_t strnlen_user(const char __user *src, size_t n) { might_fault(); - return uaccess.strnlen_user(src, n); + return __strnlen_user(src, n); } /** @@ -355,21 +335,15 @@ strnlen_user(const char __user * src, unsigned long n) /* * Zero Userspace */ +size_t __must_check __clear_user(void __user *to, size_t size); -static inline unsigned long __must_check -__clear_user(void __user *to, unsigned long n) -{ - return uaccess.clear_user(to, n); -} - -static inline unsigned long __must_check -clear_user(void __user *to, unsigned long n) +static inline size_t __must_check clear_user(void __user *to, size_t n) { might_fault(); - return uaccess.clear_user(to, n); + return __clear_user(to, n); } -extern int copy_to_user_real(void __user *dest, void *src, size_t count); -extern int copy_from_user_real(void *dest, void __user *src, size_t count); +int copy_to_user_real(void __user *dest, void *src, size_t count); +int copy_from_user_real(void *dest, void __user *src, size_t count); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 09e2f468f48b..91ea00955db7 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -47,7 +47,6 @@ #include #include -#include #include #include #include @@ -64,12 +63,6 @@ #include #include "entry.h" -/* - * User copy operations. - */ -struct uaccess_ops uaccess; -EXPORT_SYMBOL(uaccess); - /* * Machine setup.. */ @@ -1009,8 +1002,6 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; - uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt; - parse_early_param(); detect_memory_layout(memory_chunk, memory_end); os_info_init(); diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index b068729e50ac..e3fffe1dff51 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -2,8 +2,7 @@ # Makefile for s390-specific library files.. # -lib-y += delay.o string.o uaccess_pt.o find.o +lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o obj-$(CONFIG_64BIT) += mem64.o -lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index b1a22173d027..e5b9c924b733 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -6,7 +6,11 @@ #ifndef __ARCH_S390_LIB_UACCESS_H #define __ARCH_S390_LIB_UACCESS_H -extern int futex_atomic_op_pt(int, u32 __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); +size_t copy_from_user_pt(void *to, const void __user *from, size_t n); +size_t copy_to_user_pt(void __user *to, const void *from, size_t n); +size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n); +size_t clear_user_pt(void __user *to, size_t n); +size_t strnlen_user_pt(const char __user *src, size_t count); +size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 95123f57aaf8..66f35e15db2d 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -6,7 +6,9 @@ * Gerald Schaefer (gerald.schaefer@de.ibm.com) */ +#include #include +#include #include #include #include @@ -26,7 +28,10 @@ #define SLR "slgr" #endif -static size_t copy_from_user_mvcos(void *x, const void __user *ptr, size_t size) +static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE; + +static inline size_t copy_from_user_mvcos(void *x, const void __user *ptr, + size_t size) { register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; @@ -65,7 +70,16 @@ static size_t copy_from_user_mvcos(void *x, const void __user *ptr, size_t size) return size; } -static size_t copy_to_user_mvcos(void __user *ptr, const void *x, size_t size) +size_t __copy_from_user(void *to, const void __user *from, size_t n) +{ + if (static_key_true(&have_mvcos)) + return copy_from_user_mvcos(to, from, n); + return copy_from_user_pt(to, from, n); +} +EXPORT_SYMBOL(__copy_from_user); + +static inline size_t copy_to_user_mvcos(void __user *ptr, const void *x, + size_t size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -94,8 +108,16 @@ static size_t copy_to_user_mvcos(void __user *ptr, const void *x, size_t size) return size; } -static size_t copy_in_user_mvcos(void __user *to, const void __user *from, - size_t size) +size_t __copy_to_user(void __user *to, const void *from, size_t n) +{ + if (static_key_true(&have_mvcos)) + return copy_to_user_mvcos(to, from, n); + return copy_to_user_pt(to, from, n); +} +EXPORT_SYMBOL(__copy_to_user); + +static inline size_t copy_in_user_mvcos(void __user *to, const void __user *from, + size_t size) { register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; @@ -117,7 +139,15 @@ static size_t copy_in_user_mvcos(void __user *to, const void __user *from, return size; } -static size_t clear_user_mvcos(void __user *to, size_t size) +size_t __copy_in_user(void __user *to, const void __user *from, size_t n) +{ + if (static_key_true(&have_mvcos)) + return copy_in_user_mvcos(to, from, n); + return copy_in_user_pt(to, from, n); +} +EXPORT_SYMBOL(__copy_in_user); + +static inline size_t clear_user_mvcos(void __user *to, size_t size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -145,7 +175,15 @@ static size_t clear_user_mvcos(void __user *to, size_t size) return size; } -static size_t strnlen_user_mvcos(const char __user *src, size_t count) +size_t __clear_user(void __user *to, size_t size) +{ + if (static_key_true(&have_mvcos)) + return clear_user_mvcos(to, size); + return clear_user_pt(to, size); +} +EXPORT_SYMBOL(__clear_user); + +static inline size_t strnlen_user_mvcos(const char __user *src, size_t count) { size_t done, len, offset, len_str; char buf[256]; @@ -164,10 +202,18 @@ static size_t strnlen_user_mvcos(const char __user *src, size_t count) return done + 1; } -static size_t strncpy_from_user_mvcos(char *dst, const char __user *src, - size_t count) +size_t __strnlen_user(const char __user *src, size_t count) { - size_t done, len, offset, len_str; + if (static_key_true(&have_mvcos)) + return strnlen_user_mvcos(src, count); + return strnlen_user_pt(src, count); +} +EXPORT_SYMBOL(__strnlen_user); + +static inline size_t strncpy_from_user_mvcos(char *dst, const char __user *src, + size_t count) +{ + unsigned long done, len, offset, len_str; if (unlikely(!count)) return 0; @@ -185,13 +231,18 @@ static size_t strncpy_from_user_mvcos(char *dst, const char __user *src, return done; } -struct uaccess_ops uaccess_mvcos = { - .copy_from_user = copy_from_user_mvcos, - .copy_to_user = copy_to_user_mvcos, - .copy_in_user = copy_in_user_mvcos, - .clear_user = clear_user_mvcos, - .strnlen_user = strnlen_user_mvcos, - .strncpy_from_user = strncpy_from_user_mvcos, - .futex_atomic_op = futex_atomic_op_pt, - .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, -}; +long __strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (static_key_true(&have_mvcos)) + return strncpy_from_user_mvcos(dst, src, count); + return strncpy_from_user_pt(dst, src, count); +} +EXPORT_SYMBOL(__strncpy_from_user); + +static int __init uaccess_init(void) +{ + if (!MACHINE_HAS_MVCOS) + static_key_slow_dec(&have_mvcos); + return 0; +} +early_initcall(uaccess_init); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 2fa696b39b56..b49c3a440a24 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -211,7 +211,7 @@ fault: return 0; } -static size_t copy_from_user_pt(void *to, const void __user *from, size_t n) +size_t copy_from_user_pt(void *to, const void __user *from, size_t n) { size_t rc; @@ -223,14 +223,14 @@ static size_t copy_from_user_pt(void *to, const void __user *from, size_t n) return rc; } -static size_t copy_to_user_pt(void __user *to, const void *from, size_t n) +size_t copy_to_user_pt(void __user *to, const void *from, size_t n) { if (segment_eq(get_fs(), KERNEL_DS)) return copy_in_kernel(to, (void __user *) from, n); return __user_copy_pt((unsigned long) to, (void *) from, n, 1); } -static size_t clear_user_pt(void __user *to, size_t n) +size_t clear_user_pt(void __user *to, size_t n) { void *zpage = (void *) empty_zero_page; long done, size, ret; @@ -253,7 +253,7 @@ static size_t clear_user_pt(void __user *to, size_t n) return 0; } -static size_t strnlen_user_pt(const char __user *src, size_t count) +size_t strnlen_user_pt(const char __user *src, size_t count) { unsigned long uaddr = (unsigned long) src; struct mm_struct *mm = current->mm; @@ -289,8 +289,7 @@ fault: goto retry; } -static size_t strncpy_from_user_pt(char *dst, const char __user *src, - size_t count) +size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count) { size_t done, len, offset, len_str; @@ -315,8 +314,7 @@ static size_t strncpy_from_user_pt(char *dst, const char __user *src, return done; } -static size_t copy_in_user_pt(void __user *to, const void __user *from, - size_t n) +size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n) { struct mm_struct *mm = current->mm; unsigned long offset_max, uaddr, done, size, error_code; @@ -411,7 +409,7 @@ static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) +int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old) { int ret; @@ -449,8 +447,8 @@ static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, return ret; } -int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; @@ -471,14 +469,3 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, put_page(virt_to_page(uaddr)); return ret; } - -struct uaccess_ops uaccess_pt = { - .copy_from_user = copy_from_user_pt, - .copy_to_user = copy_to_user_pt, - .copy_in_user = copy_in_user_pt, - .clear_user = clear_user_pt, - .strnlen_user = strnlen_user_pt, - .strncpy_from_user = strncpy_from_user_pt, - .futex_atomic_op = futex_atomic_op_pt, - .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, -}; -- cgit v1.2.3 From 211deca6bf413560b562d69748ebc4df5d80d65e Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 24 Jan 2014 12:51:27 +0100 Subject: s390/uaccess: consistent types The types 'size_t' and 'unsigned long' have been used randomly for the uaccess functions. This looks rather confusing. So let's change all functions to use unsigned long instead and get rid of size_t in order to have a consistent interface. The only exception is strncpy_from_user() which uses 'long' since it may return a signed value (-EFAULT). Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 20 ++++++++++---------- arch/s390/lib/uaccess.h | 12 ++++++------ arch/s390/lib/uaccess_mvcos.c | 39 ++++++++++++++++++++------------------- arch/s390/lib/uaccess_pt.c | 38 +++++++++++++++++++------------------- arch/s390/mm/maccess.c | 4 ++-- 5 files changed, 57 insertions(+), 56 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 49885a518e5e..2710b41eed22 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -111,8 +111,8 @@ int __handle_fault(unsigned long, unsigned long, int); * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -size_t __must_check __copy_from_user(void *to, const void __user *from, - size_t n); +unsigned long __must_check __copy_from_user(void *to, const void __user *from, + unsigned long n); /** * __copy_to_user: - Copy a block of data into user space, with less checking. @@ -134,13 +134,13 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -static inline int __put_user_fn(void *x, void __user *ptr, size_t size) +static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { size = __copy_to_user(ptr, x, size); return size ? -EFAULT : 0; } -static inline int __get_user_fn(void *x, const void __user *ptr, size_t size) +static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { size = __copy_from_user(x, ptr, size); return size ? -EFAULT : 0; @@ -308,9 +308,9 @@ strncpy_from_user(char *dst, const char __user *src, long count) return __strncpy_from_user(dst, src, count); } -size_t __must_check __strnlen_user(const char __user *src, size_t count); +unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count); -static inline size_t strnlen_user(const char __user *src, size_t n) +static inline unsigned long strnlen_user(const char __user *src, unsigned long n) { might_fault(); return __strnlen_user(src, n); @@ -335,15 +335,15 @@ static inline size_t strnlen_user(const char __user *src, size_t n) /* * Zero Userspace */ -size_t __must_check __clear_user(void __user *to, size_t size); +unsigned long __must_check __clear_user(void __user *to, unsigned long size); -static inline size_t __must_check clear_user(void __user *to, size_t n) +static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return __clear_user(to, n); } -int copy_to_user_real(void __user *dest, void *src, size_t count); -int copy_from_user_real(void *dest, void __user *src, size_t count); +int copy_to_user_real(void __user *dest, void *src, unsigned long count); +int copy_from_user_real(void *dest, void __user *src, unsigned long count); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index e5b9c924b733..c7e0e81f4b4e 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -6,11 +6,11 @@ #ifndef __ARCH_S390_LIB_UACCESS_H #define __ARCH_S390_LIB_UACCESS_H -size_t copy_from_user_pt(void *to, const void __user *from, size_t n); -size_t copy_to_user_pt(void __user *to, const void *from, size_t n); -size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n); -size_t clear_user_pt(void __user *to, size_t n); -size_t strnlen_user_pt(const char __user *src, size_t count); -size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count); +unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n); +unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n); +unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n); +unsigned long clear_user_pt(void __user *to, unsigned long n); +unsigned long strnlen_user_pt(const char __user *src, unsigned long count); +long strncpy_from_user_pt(char *dst, const char __user *src, long count); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 66f35e15db2d..8c01f3aaf95c 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -30,8 +30,8 @@ static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE; -static inline size_t copy_from_user_mvcos(void *x, const void __user *ptr, - size_t size) +static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, + unsigned long size) { register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; @@ -70,7 +70,7 @@ static inline size_t copy_from_user_mvcos(void *x, const void __user *ptr, return size; } -size_t __copy_from_user(void *to, const void __user *from, size_t n) +unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { if (static_key_true(&have_mvcos)) return copy_from_user_mvcos(to, from, n); @@ -78,8 +78,8 @@ size_t __copy_from_user(void *to, const void __user *from, size_t n) } EXPORT_SYMBOL(__copy_from_user); -static inline size_t copy_to_user_mvcos(void __user *ptr, const void *x, - size_t size) +static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, + unsigned long size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -108,7 +108,7 @@ static inline size_t copy_to_user_mvcos(void __user *ptr, const void *x, return size; } -size_t __copy_to_user(void __user *to, const void *from, size_t n) +unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { if (static_key_true(&have_mvcos)) return copy_to_user_mvcos(to, from, n); @@ -116,8 +116,8 @@ size_t __copy_to_user(void __user *to, const void *from, size_t n) } EXPORT_SYMBOL(__copy_to_user); -static inline size_t copy_in_user_mvcos(void __user *to, const void __user *from, - size_t size) +static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, + unsigned long size) { register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; @@ -139,7 +139,7 @@ static inline size_t copy_in_user_mvcos(void __user *to, const void __user *from return size; } -size_t __copy_in_user(void __user *to, const void __user *from, size_t n) +unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) { if (static_key_true(&have_mvcos)) return copy_in_user_mvcos(to, from, n); @@ -147,7 +147,7 @@ size_t __copy_in_user(void __user *to, const void __user *from, size_t n) } EXPORT_SYMBOL(__copy_in_user); -static inline size_t clear_user_mvcos(void __user *to, size_t size) +static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; @@ -175,7 +175,7 @@ static inline size_t clear_user_mvcos(void __user *to, size_t size) return size; } -size_t __clear_user(void __user *to, size_t size) +unsigned long __clear_user(void __user *to, unsigned long size) { if (static_key_true(&have_mvcos)) return clear_user_mvcos(to, size); @@ -183,14 +183,15 @@ size_t __clear_user(void __user *to, size_t size) } EXPORT_SYMBOL(__clear_user); -static inline size_t strnlen_user_mvcos(const char __user *src, size_t count) +static inline unsigned long strnlen_user_mvcos(const char __user *src, + unsigned long count) { - size_t done, len, offset, len_str; + unsigned long done, len, offset, len_str; char buf[256]; done = 0; do { - offset = (size_t)src & ~PAGE_MASK; + offset = (unsigned long)src & ~PAGE_MASK; len = min(256UL, PAGE_SIZE - offset); len = min(count - done, len); if (copy_from_user_mvcos(buf, src, len)) @@ -202,7 +203,7 @@ static inline size_t strnlen_user_mvcos(const char __user *src, size_t count) return done + 1; } -size_t __strnlen_user(const char __user *src, size_t count) +unsigned long __strnlen_user(const char __user *src, unsigned long count) { if (static_key_true(&have_mvcos)) return strnlen_user_mvcos(src, count); @@ -210,16 +211,16 @@ size_t __strnlen_user(const char __user *src, size_t count) } EXPORT_SYMBOL(__strnlen_user); -static inline size_t strncpy_from_user_mvcos(char *dst, const char __user *src, - size_t count) +static inline long strncpy_from_user_mvcos(char *dst, const char __user *src, + long count) { unsigned long done, len, offset, len_str; - if (unlikely(!count)) + if (unlikely(count <= 0)) return 0; done = 0; do { - offset = (size_t)src & ~PAGE_MASK; + offset = (unsigned long)src & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); if (copy_from_user_mvcos(dst, src, len)) return -EFAULT; diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b49c3a440a24..8d39760bae68 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -22,7 +22,7 @@ #define SLR "slgr" #endif -static size_t strnlen_kernel(const char __user *src, size_t count) +static unsigned long strnlen_kernel(const char __user *src, unsigned long count) { register unsigned long reg0 asm("0") = 0UL; unsigned long tmp1, tmp2; @@ -42,8 +42,8 @@ static size_t strnlen_kernel(const char __user *src, size_t count) return count; } -static size_t copy_in_kernel(void __user *to, const void __user *from, - size_t count) +static unsigned long copy_in_kernel(void __user *to, const void __user *from, + unsigned long count) { unsigned long tmp1; @@ -146,8 +146,8 @@ static unsigned long follow_table(struct mm_struct *mm, #endif /* CONFIG_64BIT */ -static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, - size_t n, int write_user) +static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr, + unsigned long n, int write_user) { struct mm_struct *mm = current->mm; unsigned long offset, done, size, kaddr; @@ -189,8 +189,7 @@ fault: * Do DAT for user address by page table walk, return kernel address. * This function needs to be called with current->mm->page_table_lock held. */ -static __always_inline unsigned long __dat_user_addr(unsigned long uaddr, - int write) +static inline unsigned long __dat_user_addr(unsigned long uaddr, int write) { struct mm_struct *mm = current->mm; unsigned long kaddr; @@ -211,9 +210,9 @@ fault: return 0; } -size_t copy_from_user_pt(void *to, const void __user *from, size_t n) +unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n) { - size_t rc; + unsigned long rc; if (segment_eq(get_fs(), KERNEL_DS)) return copy_in_kernel((void __user *) to, from, n); @@ -223,17 +222,17 @@ size_t copy_from_user_pt(void *to, const void __user *from, size_t n) return rc; } -size_t copy_to_user_pt(void __user *to, const void *from, size_t n) +unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n) { if (segment_eq(get_fs(), KERNEL_DS)) return copy_in_kernel(to, (void __user *) from, n); return __user_copy_pt((unsigned long) to, (void *) from, n, 1); } -size_t clear_user_pt(void __user *to, size_t n) +unsigned long clear_user_pt(void __user *to, unsigned long n) { void *zpage = (void *) empty_zero_page; - long done, size, ret; + unsigned long done, size, ret; done = 0; do { @@ -253,12 +252,12 @@ size_t clear_user_pt(void __user *to, size_t n) return 0; } -size_t strnlen_user_pt(const char __user *src, size_t count) +unsigned long strnlen_user_pt(const char __user *src, unsigned long count) { unsigned long uaddr = (unsigned long) src; struct mm_struct *mm = current->mm; unsigned long offset, done, len, kaddr; - size_t len_str; + unsigned long len_str; if (unlikely(!count)) return 0; @@ -289,15 +288,15 @@ fault: goto retry; } -size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count) +long strncpy_from_user_pt(char *dst, const char __user *src, long count) { - size_t done, len, offset, len_str; + unsigned long done, len, offset, len_str; - if (unlikely(!count)) + if (unlikely(count <= 0)) return 0; done = 0; do { - offset = (size_t)src & ~PAGE_MASK; + offset = (unsigned long)src & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); if (segment_eq(get_fs(), KERNEL_DS)) { if (copy_in_kernel((void __user *) dst, src, len)) @@ -314,7 +313,8 @@ size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count) return done; } -size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n) +unsigned long copy_in_user_pt(void __user *to, const void __user *from, + unsigned long n) { struct mm_struct *mm = current->mm; unsigned long offset_max, uaddr, done, size, error_code; diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index d1e0e0c7a7e2..efe8ad045c7b 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -128,7 +128,7 @@ void memcpy_absolute(void *dest, void *src, size_t count) /* * Copy memory from kernel (real) to user (virtual) */ -int copy_to_user_real(void __user *dest, void *src, size_t count) +int copy_to_user_real(void __user *dest, void *src, unsigned long count) { int offs = 0, size, rc; char *buf; @@ -154,7 +154,7 @@ out: /* * Copy memory from user (virtual) to kernel (real) */ -int copy_from_user_real(void *dest, void __user *src, size_t count) +int copy_from_user_real(void *dest, void __user *src, unsigned long count) { int offs = 0, size, rc; char *buf; -- cgit v1.2.3 From ca04ddbf537d30b25f6e240b70f19be35fac4313 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 24 Jan 2014 13:03:42 +0100 Subject: s390/setup: get rid of MACHINE_HAS_MVCOS machine flag MACHINE_HAS_MVCOS is used exactly once when the machine is brought up. There is no need to cache the flag in the machine_flags. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/setup.h | 3 --- arch/s390/kernel/early.c | 2 -- arch/s390/lib/uaccess_mvcos.c | 3 ++- 3 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 94cfbe442f12..406f3a1e63ef 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -59,7 +59,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, #define MACHINE_FLAG_DIAG44 (1UL << 4) #define MACHINE_FLAG_IDTE (1UL << 5) #define MACHINE_FLAG_DIAG9C (1UL << 6) -#define MACHINE_FLAG_MVCOS (1UL << 7) #define MACHINE_FLAG_KVM (1UL << 8) #define MACHINE_FLAG_ESOP (1UL << 9) #define MACHINE_FLAG_EDAT1 (1UL << 10) @@ -85,7 +84,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, #define MACHINE_HAS_IDTE (0) #define MACHINE_HAS_DIAG44 (1) #define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) -#define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_EDAT1 (0) #define MACHINE_HAS_EDAT2 (0) #define MACHINE_HAS_LPP (0) @@ -98,7 +96,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) #define MACHINE_HAS_MVPG (1) -#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index fca20b5fe79e..6b594439cca5 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -380,8 +380,6 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2; if (test_facility(3)) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; - if (test_facility(27)) - S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; if (test_facility(50) && test_facility(73)) diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 8c01f3aaf95c..e2685ff2ec4b 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include "uaccess.h" @@ -242,7 +243,7 @@ EXPORT_SYMBOL(__strncpy_from_user); static int __init uaccess_init(void) { - if (!MACHINE_HAS_MVCOS) + if (IS_ENABLED(CONFIG_32BIT) || !test_facility(27)) static_key_slow_dec(&have_mvcos); return 0; } -- cgit v1.2.3 From 7385d0a550813c912094c5df22aeeb463712300e Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 27 Jan 2014 10:09:11 +0100 Subject: s390/uaccess: remove dead kernel parameter 'user_mode=' Remove another leftover from the time when we supported running user space in either home or primary address space. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 91ea00955db7..f70f2489fa5f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -287,14 +287,6 @@ static int __init parse_vmalloc(char *arg) } early_param("vmalloc", parse_vmalloc); -static int __init early_parse_user_mode(char *p) -{ - if (!p || strcmp(p, "primary") == 0) - return 0; - return 1; -} -early_param("user_mode", early_parse_user_mode); - void *restart_stack __attribute__((__section__(".data"))); static void __init setup_lowcore(void) -- cgit v1.2.3 From 56f15e518cfdc732bd4e4da90e0c9cf2fc4e7c1b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 27 Jan 2014 10:25:39 +0100 Subject: s390/uaccess: introduce 'uaccesspt' kernel parameter The uaccesspt kernel parameter allows to enforce using the uaccess page table walk variant. This is mainly for debugging purposes, so this mode can also be enabled on machines which support the mvcos instruction. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/lib/uaccess_mvcos.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index e2685ff2ec4b..ae97b8df11aa 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -241,9 +241,22 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) } EXPORT_SYMBOL(__strncpy_from_user); +/* + * The uaccess page tabe walk variant can be enforced with the "uaccesspt" + * kernel parameter. This is mainly for debugging purposes. + */ +static int force_uaccess_pt __initdata; + +static int __init parse_uaccess_pt(char *__unused) +{ + force_uaccess_pt = 1; + return 0; +} +early_param("uaccesspt", parse_uaccess_pt); + static int __init uaccess_init(void) { - if (IS_ENABLED(CONFIG_32BIT) || !test_facility(27)) + if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27)) static_key_slow_dec(&have_mvcos); return 0; } -- cgit v1.2.3 From 53e857f30867918b3618d8e18902e63291946ef4 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 10 Sep 2012 13:00:09 +0200 Subject: s390/mm,tlb: race of lazy TLB flush vs. recreation of TLB entries Git commit 050eef364ad70059 "[S390] fix tlb flushing vs. concurrent /proc accesses" introduced the attach counter to avoid using the mm_users value to decide between IPTE for every PTE and lazy TLB flushing with IDTE. That fixed the problem with mm_users but it introduced another subtle race, fortunately one that is very hard to hit. The background is the requirement of the architecture that a valid PTE may not be changed while it can be used concurrently by another cpu. The decision between IPTE and lazy TLB flushing needs to be done while the PTE is still valid. Now if the virtual cpu is temporarily stopped after the decision to use lazy TLB flushing but before the invalid bit of the PTE has been set, another cpu can attach the mm, find that flush_mm is set, do the IDTE, return to userspace, and recreate a TLB that uses the PTE in question. When the first, stopped cpu continues it will change the PTE while it is attached on another cpu. The first cpu will do another IDTE shortly after the modification of the PTE which makes the race window quite short. To fix this race the CPU that wants to attach the address space of a user space thread needs to wait for the end of the PTE modification. The number of concurrent TLB flushers for an mm is tracked in the upper 16 bits of the attach_count and finish_arch_post_lock_switch is used to wait for the end of the flush operation if required. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mmu_context.h | 39 ++++++++++++++++++++---- arch/s390/include/asm/pgtable.h | 60 +++++++++++++++++++++++-------------- arch/s390/include/asm/thread_info.h | 2 ++ arch/s390/kernel/entry.S | 9 ++++-- arch/s390/kernel/entry64.S | 9 ++++-- 5 files changed, 85 insertions(+), 34 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 5d1f950704dc..38149b63dc44 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - update_mm(next, tsk); + int cpu = smp_processor_id(); + + if (prev == next) + return; + if (atomic_inc_return(&next->context.attach_count) >> 16) { + /* Delay update_mm until all TLB flushes are done. */ + set_tsk_thread_flag(tsk, TIF_TLB_WAIT); + } else { + cpumask_set_cpu(cpu, mm_cpumask(next)); + update_mm(next, tsk); + if (next->context.flush_mm) + /* Flush pending TLBs */ + __tlb_flush_mm(next); + } atomic_dec(&prev->context.attach_count); WARN_ON(atomic_read(&prev->context.attach_count) < 0); - atomic_inc(&next->context.attach_count); - /* Check for TLBs not flushed yet */ - __tlb_flush_mm_lazy(next); +} + +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) + return; + preempt_disable(); + clear_tsk_thread_flag(tsk, TIF_TLB_WAIT); + while (atomic_read(&mm->context.attach_count) >> 16) + cpu_relax(); + + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + update_mm(mm, tsk); + if (mm->context.flush_mm) + __tlb_flush_mm(mm); + preempt_enable(); } #define enter_lazy_tlb(mm,tsk) do { } while (0) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2204400d0bd5..fc4bb82a0739 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1034,30 +1034,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { - if (!(pte_val(*ptep) & _PAGE_INVALID)) { + unsigned long pto = (unsigned long) ptep; + #ifndef CONFIG_64BIT - /* pto must point to the start of the segment table */ - pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); -#else - /* ipte in zarch mode can do the math */ - pte_t *pto = ptep; + /* pto in ESA mode must point to the start of the segment table */ + pto &= 0x7ffffc00; #endif - asm volatile( - " ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address)); - } + /* Invalidation + global TLB flush for the pte */ + asm volatile( + " ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); +} + +static inline void ptep_flush_direct(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + if (pte_val(*ptep) & _PAGE_INVALID) + return; + __ptep_ipte(address, ptep); } static inline void ptep_flush_lazy(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - int active = (mm == current->active_mm) ? 1 : 0; + int active, count; - if (atomic_read(&mm->context.attach_count) > active) - __ptep_ipte(address, ptep); - else + if (pte_val(*ptep) & _PAGE_INVALID) + return; + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if ((count & 0xffff) <= active) { + pte_val(*ptep) |= _PAGE_INVALID; mm->context.flush_mm = 1; + } else + __ptep_ipte(address, ptep); + atomic_sub(0x10000, &mm->context.attach_count); } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -1074,7 +1085,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, } pte = *ptep; - __ptep_ipte(addr, ptep); + ptep_flush_direct(vma->vm_mm, addr, ptep); young = pte_young(pte); pte = pte_mkold(pte); @@ -1145,7 +1156,6 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, pte = *ptep; ptep_flush_lazy(mm, address, ptep); - pte_val(*ptep) |= _PAGE_INVALID; if (mm_has_pgste(mm)) { pgste = pgste_update_all(&pte, pgste); @@ -1182,7 +1192,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, } pte = *ptep; - __ptep_ipte(address, ptep); + ptep_flush_direct(vma->vm_mm, address, ptep); pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(vma->vm_mm)) { @@ -1263,7 +1273,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); } - __ptep_ipte(address, ptep); + ptep_flush_direct(vma->vm_mm, address, ptep); if (mm_has_pgste(vma->vm_mm)) { pgste_set_pte(ptep, entry); @@ -1447,12 +1457,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) static inline void pmdp_flush_lazy(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - int active = (mm == current->active_mm) ? 1 : 0; + int active, count; - if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) - __pmd_idte(address, pmdp); - else + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if ((count & 0xffff) <= active) { + pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; mm->context.flush_mm = 1; + } else + __pmd_idte(address, pmdp); + atomic_sub(0x10000, &mm->context.attach_count); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 10e0fcd3633d..d2e53d667a92 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ @@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NOTIFY_RESUME (1< Date: Wed, 17 Apr 2013 17:36:29 +0200 Subject: s390/kvm: support collaborative memory management This patch enables Collaborative Memory Management (CMM) for kvm on s390. CMM allows the guest to inform the host about page usage (see arch/s390/mm/cmm.c). The host uses this information to avoid swapping in unused pages in the page fault handler. Further, a CPU provided list of unused invalid pages is processed to reclaim swap space of not yet accessed unused pages. [ Martin Schwidefsky: patch reordering and cleanup ] Signed-off-by: Konstantin Weitz Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_host.h | 5 ++- arch/s390/include/asm/pgtable.h | 26 ++++++++++++++ arch/s390/kvm/kvm-s390.c | 25 +++++++++++++ arch/s390/kvm/kvm-s390.h | 2 ++ arch/s390/kvm/priv.c | 41 +++++++++++++++++++++ arch/s390/mm/pgtable.c | 77 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 175 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index eef3dd3fd9a9..9bf95bb30f1a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -106,7 +106,9 @@ struct kvm_s390_sie_block { __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ __u32 fac; /* 0x01a0 */ - __u8 reserved1a4[68]; /* 0x01a4 */ + __u8 reserved1a4[20]; /* 0x01a4 */ + __u64 cbrlo; /* 0x01b8 */ + __u8 reserved1c0[40]; /* 0x01c0 */ __u64 itdba; /* 0x01e8 */ __u8 reserved1f0[16]; /* 0x01f0 */ } __attribute__((packed)); @@ -155,6 +157,7 @@ struct kvm_vcpu_stat { u32 instruction_stsi; u32 instruction_stfl; u32 instruction_tprot; + u32 instruction_essa; u32 instruction_sigp_sense; u32 instruction_sigp_sense_running; u32 instruction_sigp_external_call; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fc4bb82a0739..a7dd672c97f8 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -229,6 +229,7 @@ extern unsigned long MODULES_END; #define _PAGE_READ 0x010 /* SW pte read bit */ #define _PAGE_WRITE 0x020 /* SW pte write bit */ #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ +#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ @@ -394,6 +395,12 @@ extern unsigned long MODULES_END; #endif /* CONFIG_64BIT */ +/* Guest Page State used for virtualization */ +#define _PGSTE_GPS_ZERO 0x0000000080000000UL +#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL +#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL +#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL + /* * A user page table pointer has the space-switch-event bit, the * private-space-control bit and the storage-alteration-event-control @@ -617,6 +624,14 @@ static inline int pte_none(pte_t pte) return pte_val(pte) == _PAGE_INVALID; } +static inline int pte_swap(pte_t pte) +{ + /* Bit pattern: (pte & 0x603) == 0x402 */ + return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | + _PAGE_TYPE | _PAGE_PRESENT)) + == (_PAGE_INVALID | _PAGE_TYPE); +} + static inline int pte_file(pte_t pte) { /* Bit pattern: (pte & 0x601) == 0x600 */ @@ -821,6 +836,7 @@ unsigned long gmap_translate(unsigned long address, struct gmap *); unsigned long __gmap_fault(unsigned long address, struct gmap *); unsigned long gmap_fault(unsigned long address, struct gmap *); void gmap_discard(unsigned long from, unsigned long to, struct gmap *); +void __gmap_zap(unsigned long address, struct gmap *); void gmap_register_ipte_notifier(struct gmap_notifier *); void gmap_unregister_ipte_notifier(struct gmap_notifier *); @@ -852,6 +868,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); + pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; pgste_set_key(ptep, pgste, entry); pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); @@ -881,6 +898,12 @@ static inline int pte_young(pte_t pte) return (pte_val(pte) & _PAGE_YOUNG) != 0; } +#define __HAVE_ARCH_PTE_UNUSED +static inline int pte_unused(pte_t pte) +{ + return pte_val(pte) & _PAGE_UNUSED; +} + /* * pgd/pmd/pte modification functions */ @@ -1196,6 +1219,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(vma->vm_mm)) { + if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == + _PGSTE_GPS_USAGE_UNUSED) + pte_val(pte) |= _PAGE_UNUSED; pgste = pgste_update_all(&pte, pgste); pgste_set_unlock(ptep, pgste); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index e0676f390d57..10b5db3c9bc4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -68,6 +68,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, { "instruction_stsch", VCPU_STAT(instruction_stsch) }, { "instruction_chsc", VCPU_STAT(instruction_chsc) }, + { "instruction_essa", VCPU_STAT(instruction_essa) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, { "instruction_tprot", VCPU_STAT(instruction_tprot) }, @@ -283,7 +284,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) if (kvm_is_ucontrol(vcpu->kvm)) gmap_free(vcpu->arch.gmap); + if (vcpu->arch.sie_block->cbrlo) + __free_page(__pfn_to_page( + vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT)); free_page((unsigned long)(vcpu->arch.sie_block)); + kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } @@ -390,6 +395,8 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { + struct page *cbrl; + atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM | CPUSTAT_STOPPED | @@ -401,6 +408,14 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) vfacilities; + if (kvm_enabled_cmma()) { + cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (cbrl) { + vcpu->arch.sie_block->ecb2 |= 0x80; + vcpu->arch.sie_block->ecb2 &= ~0x08; + vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl); + } + } hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, (unsigned long) vcpu); @@ -761,6 +776,16 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) return rc; } +bool kvm_enabled_cmma(void) +{ + if (!MACHINE_IS_LPAR) + return false; + /* only enable for z10 and later */ + if (!MACHINE_HAS_EDAT1) + return false; + return true; +} + static int __vcpu_run(struct kvm_vcpu *vcpu) { int rc, exit_reason; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index f9559b0bd620..564514f410f4 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -156,6 +156,8 @@ void s390_vcpu_block(struct kvm_vcpu *vcpu); void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); void exit_sie_sync(struct kvm_vcpu *vcpu); +/* are we going to support cmma? */ +bool kvm_enabled_cmma(void); /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 75beea632a10..aacb6b129914 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -636,8 +636,49 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return 0; } +static int handle_essa(struct kvm_vcpu *vcpu) +{ + /* entries expected to be 1FF */ + int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; + unsigned long *cbrlo, cbrle; + struct gmap *gmap; + int i; + + VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); + gmap = vcpu->arch.gmap; + vcpu->stat.instruction_essa++; + if (!kvm_enabled_cmma() || !vcpu->arch.sie_block->cbrlo) + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + /* Rewind PSW to repeat the ESSA instruction */ + vcpu->arch.sie_block->gpsw.addr = + __rewind_psw(vcpu->arch.sie_block->gpsw, 4); + vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ + cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); + down_read(&gmap->mm->mmap_sem); + for (i = 0; i < entries; ++i) { + cbrle = cbrlo[i]; + if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) + /* invalid entry */ + break; + /* try to free backing */ + __gmap_zap(cbrle, gmap); + } + up_read(&gmap->mm->mmap_sem); + if (i < entries) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + return 0; +} + static const intercept_handler_t b9_handlers[256] = { [0x8d] = handle_epsw, + [0xab] = handle_essa, [0xaf] = handle_pfmf, }; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 3584ed9b20a1..9e2b4705dea2 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -594,6 +595,82 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap) } EXPORT_SYMBOL_GPL(gmap_fault); +static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) +{ + if (!non_swap_entry(entry)) + dec_mm_counter(mm, MM_SWAPENTS); + else if (is_migration_entry(entry)) { + struct page *page = migration_entry_to_page(entry); + + if (PageAnon(page)) + dec_mm_counter(mm, MM_ANONPAGES); + else + dec_mm_counter(mm, MM_FILEPAGES); + } + free_swap_and_cache(entry); +} + +/** + * The mm->mmap_sem lock must be held + */ +static void gmap_zap_unused(struct mm_struct *mm, unsigned long address) +{ + unsigned long ptev, pgstev; + spinlock_t *ptl; + pgste_t pgste; + pte_t *ptep, pte; + + ptep = get_locked_pte(mm, address, &ptl); + if (unlikely(!ptep)) + return; + pte = *ptep; + if (!pte_swap(pte)) + goto out_pte; + /* Zap unused and logically-zero pages */ + pgste = pgste_get_lock(ptep); + pgstev = pgste_val(pgste); + ptev = pte_val(pte); + if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || + ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) { + gmap_zap_swap_entry(pte_to_swp_entry(pte), mm); + pte_clear(mm, address, ptep); + } + pgste_set_unlock(ptep, pgste); +out_pte: + pte_unmap_unlock(*ptep, ptl); +} + +/* + * this function is assumed to be called with mmap_sem held + */ +void __gmap_zap(unsigned long address, struct gmap *gmap) +{ + unsigned long *table, *segment_ptr; + unsigned long segment, pgstev, ptev; + struct gmap_pgtable *mp; + struct page *page; + + segment_ptr = gmap_table_walk(address, gmap); + if (IS_ERR(segment_ptr)) + return; + segment = *segment_ptr; + if (segment & _SEGMENT_ENTRY_INVALID) + return; + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + address = mp->vmaddr | (address & ~PMD_MASK); + /* Page table is present */ + table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + pgstev = table[PTRS_PER_PTE]; + ptev = table[0]; + /* quick check, checked again with locks held */ + if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || + ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) + gmap_zap_unused(gmap->mm, address); +} +EXPORT_SYMBOL_GPL(__gmap_zap); + void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) { -- cgit v1.2.3 From deedabb2b4a68a63351a949b1abcf73fc97eb406 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 21 May 2013 17:29:52 +0200 Subject: s390/kvm: set guest page states to stable on re-ipl The guest page state needs to be reset to stable for all pages on initial program load via diagnose 0x308. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgalloc.h | 1 + arch/s390/kvm/diag.c | 3 ++ arch/s390/mm/pgtable.c | 72 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index e1408ddb94f8..14d43c77d6cf 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -22,6 +22,7 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *); +void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 8216c0e0b2e2..6f9cfa500372 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -13,6 +13,7 @@ #include #include +#include #include #include "kvm-s390.h" #include "trace.h" @@ -86,9 +87,11 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; + page_table_reset_pgste(current->mm, 0, TASK_SIZE); break; case 4: vcpu->run->s390_reset_flags = 0; + page_table_reset_pgste(current->mm, 0, TASK_SIZE); break; default: return -EOPNOTSUPP; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 9e2b4705dea2..9c26b7aa96d9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -879,6 +879,78 @@ static inline void page_table_free_pgste(unsigned long *table) __free_page(page); } +static inline unsigned long page_table_reset_pte(struct mm_struct *mm, + pmd_t *pmd, unsigned long addr, unsigned long end) +{ + pte_t *start_pte, *pte; + spinlock_t *ptl; + pgste_t pgste; + + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte = start_pte; + do { + pgste = pgste_get_lock(pte); + pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; + pgste_set_unlock(pte, pgste); + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(start_pte, ptl); + + return addr; +} + +static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, + pud_t *pud, unsigned long addr, unsigned long end) +{ + unsigned long next; + pmd_t *pmd; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + next = page_table_reset_pte(mm, pmd, addr, next); + } while (pmd++, addr = next, addr != end); + + return addr; +} + +static inline unsigned long page_table_reset_pud(struct mm_struct *mm, + pgd_t *pgd, unsigned long addr, unsigned long end) +{ + unsigned long next; + pud_t *pud; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + next = page_table_reset_pmd(mm, pud, addr, next); + } while (pud++, addr = next, addr != end); + + return addr; +} + +void page_table_reset_pgste(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + unsigned long addr, next; + pgd_t *pgd; + + addr = start; + down_read(&mm->mmap_sem); + pgd = pgd_offset(mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + next = page_table_reset_pud(mm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); + up_read(&mm->mmap_sem); +} +EXPORT_SYMBOL(page_table_reset_pgste); + int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq) { -- cgit v1.2.3 From db85eaeb52637eb91a7bbc70f6684f5563b983e9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 12 Feb 2014 12:43:40 +0100 Subject: s390/bitops: fix comment Fix some numbers in the comments describing the layout of the bit maps. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/bitops.h | 8 ++++---- arch/s390/lib/find.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 6e6ad0680829..ec5ef891db6b 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -13,9 +13,9 @@ * * The bitop functions are defined to work on unsigned longs, so for an * s390x system the bits end up numbered: - * |63..............0|127............64|191...........128|255...........196| + * |63..............0|127............64|191...........128|255...........192| * and on s390: - * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| + * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224| * * There are a few little-endian macros used mostly for filesystem * bitmaps, these work on similar bit arrays layouts, but @@ -30,7 +30,7 @@ * on an s390x system the bits are numbered: * |0..............63|64............127|128...........191|192...........255| * and on s390: - * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| * * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit * number field needs to be reversed compared to the LSB0 encoded bit @@ -304,7 +304,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) * On an s390x system the bits are numbered: * |0..............63|64............127|128...........191|192...........255| * and on s390: - * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| */ unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c index 620d34d6487e..922003c1b90d 100644 --- a/arch/s390/lib/find.c +++ b/arch/s390/lib/find.c @@ -4,7 +4,7 @@ * On s390x the bits are numbered: * |0..............63|64............127|128...........191|192...........255| * and on s390: - * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| * * The reason for this bit numbering is the fact that the hardware sets bits * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap -- cgit v1.2.3 From ec66ad66a0de87866be347b5ecc83bd46427f53b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 12 Feb 2014 14:16:18 +0100 Subject: s390/mm: enable split page table lock for PMD level Add the pgtable_pmd_page_ctor/pgtable_pmd_page_dtor calls to the pmd allocation and free functions and enable ARCH_ENABLE_SPLIT_PMD_PTLOCK for 64 bit. Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 4 ++++ arch/s390/include/asm/pgalloc.h | 17 ++++++++++++++--- arch/s390/mm/pgtable.c | 4 ++-- 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 65a07750f4f9..86db5a8e6586 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -415,6 +415,10 @@ config ARCH_ENABLE_MEMORY_HOTPLUG config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y +config ARCH_ENABLE_SPLIT_PMD_PTLOCK + def_bool y + depends on 64BIT + config FORCE_MAX_ZONEORDER int default "9" diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 14d43c77d6cf..884017cbfa9f 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -92,11 +92,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { unsigned long *table = crst_table_alloc(mm); - if (table) - crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + + if (!table) + return NULL; + crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + if (!pgtable_pmd_page_ctor(virt_to_page(table))) { + crst_table_free(mm, table); + return NULL; + } return (pmd_t *) table; } -#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + pgtable_pmd_page_dtor(virt_to_page(pmd)); + crst_table_free(mm, (unsigned long *) pmd); +} static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 9c26b7aa96d9..f8b58a7a3048 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1397,7 +1397,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, { struct list_head *lh = (struct list_head *) pgtable; - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) @@ -1413,7 +1413,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_t pgtable; pte_t *ptep; - assert_spin_locked(&mm->page_table_lock); + assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); -- cgit v1.2.3 From fe7c30a420761654777d3cc15412fc7626407e93 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 13 Feb 2014 13:02:32 +0100 Subject: s390/airq: add support for irq ranges Add airq_iv_alloc and airq_iv_free to allocate and free consecutive ranges of irqs from the interrupt vector. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/airq.h | 14 ++++++++-- drivers/s390/cio/airq.c | 66 +++++++++++++++++++++++++++----------------- 2 files changed, 53 insertions(+), 27 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index 4bbb5957ed1b..bd93ff6661b8 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h @@ -44,11 +44,21 @@ struct airq_iv { struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); void airq_iv_release(struct airq_iv *iv); -unsigned long airq_iv_alloc_bit(struct airq_iv *iv); -void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num); +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num); unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, unsigned long end); +static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv) +{ + return airq_iv_alloc(iv, 1); +} + +static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) +{ + airq_iv_free(iv, bit, 1); +} + static inline unsigned long airq_iv_end(struct airq_iv *iv) { return iv->end; diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index f055df0b167f..445564c790f6 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv) EXPORT_SYMBOL(airq_iv_release); /** - * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector + * airq_iv_alloc - allocate irq bits from an interrupt vector * @iv: pointer to an interrupt vector structure + * @num: number of consecutive irq bits to allocate * - * Returns the bit number of the allocated irq, or -1UL if no bit - * is available or the AIRQ_IV_ALLOC flag has not been specified + * Returns the bit number of the first irq in the allocated block of irqs, + * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been + * specified */ -unsigned long airq_iv_alloc_bit(struct airq_iv *iv) +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num) { - unsigned long bit; + unsigned long bit, i; - if (!iv->avail) + if (!iv->avail || num == 0) return -1UL; spin_lock(&iv->lock); bit = find_first_bit_inv(iv->avail, iv->bits); - if (bit < iv->bits) { - clear_bit_inv(bit, iv->avail); - if (bit >= iv->end) - iv->end = bit + 1; - } else + while (bit + num <= iv->bits) { + for (i = 1; i < num; i++) + if (!test_bit_inv(bit + i, iv->avail)) + break; + if (i >= num) { + /* Found a suitable block of irqs */ + for (i = 0; i < num; i++) + clear_bit_inv(bit + i, iv->avail); + if (bit + num >= iv->end) + iv->end = bit + num + 1; + break; + } + bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1); + } + if (bit + num > iv->bits) bit = -1UL; spin_unlock(&iv->lock); return bit; } -EXPORT_SYMBOL(airq_iv_alloc_bit); +EXPORT_SYMBOL(airq_iv_alloc); /** - * airq_iv_free_bit - free an irq bit of an interrupt vector + * airq_iv_free - free irq bits of an interrupt vector * @iv: pointer to interrupt vector structure - * @bit: number of the irq bit to free + * @bit: number of the first irq bit to free + * @num: number of consecutive irq bits to free */ -void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num) { - if (!iv->avail) + unsigned long i; + + if (!iv->avail || num == 0) return; spin_lock(&iv->lock); - /* Clear (possibly left over) interrupt bit */ - clear_bit_inv(bit, iv->vector); - /* Make the bit position available again */ - set_bit_inv(bit, iv->avail); - if (bit == iv->end - 1) { + for (i = 0; i < num; i++) { + /* Clear (possibly left over) interrupt bit */ + clear_bit_inv(bit + i, iv->vector); + /* Make the bit positions available again */ + set_bit_inv(bit + i, iv->avail); + } + if (bit + num >= iv->end) { /* Find new end of bit-field */ - while (--iv->end > 0) - if (!test_bit_inv(iv->end - 1, iv->avail)) - break; + while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail)) + iv->end--; } spin_unlock(&iv->lock); } -EXPORT_SYMBOL(airq_iv_free_bit); +EXPORT_SYMBOL(airq_iv_free); /** * airq_iv_scan - scan interrupt vector for non-zero bits -- cgit v1.2.3 From f7e1e65d29636d050cdde0770b9544572959a67d Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 17 Feb 2014 11:16:10 +0100 Subject: s390: improve debug feature usage The maximum usable buffer size of the s390 debug feature (when using the sprintf_view) is 11 * sizeof(long) (1 pointer for the format string + 10 arguments). When a larger buffer size is specified the additional memory is unused and wasted per debug entry. So reducing the buffer size to its maximum (or to the actual buffer size used) will make more precious debug feature space usable. For pci_msg, chsc_msg, and cio_crw we use the additional usable dbf space to reduce the number of allocated pages. Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/pci/pci_debug.c | 2 +- drivers/s390/cio/chsc_sch.c | 3 +-- drivers/s390/cio/cio.c | 4 ++-- drivers/s390/net/qeth_core_main.c | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index 75c69b402e05..c5c66840ac00 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -139,7 +139,7 @@ void zpci_debug_exit_device(struct zpci_dev *zdev) int __init zpci_debug_init(void) { /* event trace buffer */ - pci_debug_msg_id = debug_register("pci_msg", 16, 1, 16 * sizeof(long)); + pci_debug_msg_id = debug_register("pci_msg", 8, 1, 8 * sizeof(long)); if (!pci_debug_msg_id) return -EINVAL; debug_register_view(pci_debug_msg_id, &debug_sprintf_view); diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 7b29d0be0ca3..1d3661af7bd8 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -173,8 +173,7 @@ static struct css_driver chsc_subchannel_driver = { static int __init chsc_init_dbfs(void) { - chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, - 16 * sizeof(long)); + chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); if (!chsc_debug_msg_id) goto out; debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 8ee88c4ebd83..97c48b38d67d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -54,7 +54,7 @@ debug_info_t *cio_debug_crw_id; */ static int __init cio_debug_init(void) { - cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); + cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long)); if (!cio_debug_msg_id) goto out_unregister; debug_register_view(cio_debug_msg_id, &debug_sprintf_view); @@ -64,7 +64,7 @@ static int __init cio_debug_init(void) goto out_unregister; debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); debug_set_level(cio_debug_trace_id, 2); - cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); + cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long)); if (!cio_debug_crw_id) goto out_unregister; debug_register_view(cio_debug_crw_id, &debug_sprintf_view); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c3a83df07894..bd5d4ab4bcf7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -33,8 +33,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, - [QETH_DBF_MSG] = {"qeth_msg", - 8, 1, 128, 3, &debug_sprintf_view, NULL}, + [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, + &debug_sprintf_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; -- cgit v1.2.3 From 823002023da6d9124ed63bc622267c15ab2a7347 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 24 Feb 2014 16:18:55 +0100 Subject: s390/uaccess: remove copy_from_user_real() There is no user left, so remove it. It was also potentially broken, since the function didn't clear destination memory if copy_from_user() failed. Which would allow for information leaks. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 1 - arch/s390/mm/maccess.c | 26 -------------------------- 2 files changed, 27 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 2710b41eed22..4133b3f72fb0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -344,6 +344,5 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); -int copy_from_user_real(void *dest, void __user *src, unsigned long count); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index efe8ad045c7b..2a2e35416d2f 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -151,32 +151,6 @@ out: return rc; } -/* - * Copy memory from user (virtual) to kernel (real) - */ -int copy_from_user_real(void *dest, void __user *src, unsigned long count) -{ - int offs = 0, size, rc; - char *buf; - - buf = (char *) __get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - rc = -EFAULT; - while (offs < count) { - size = min(PAGE_SIZE, count - offs); - if (copy_from_user(buf, src + offs, size)) - goto out; - if (memcpy_real(dest + offs, buf, size)) - goto out; - offs += size; - } - rc = 0; -out: - free_page((unsigned long) buf); - return rc; -} - /* * Check if physical address is within prefix or zero page */ -- cgit v1.2.3 From d72d2bb5a6bc85af77a9f969687c74ea00cdcc99 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 24 Feb 2014 16:33:08 +0100 Subject: s390/checksum: remove memset() within csum_partial_copy_from_user() The memset() within csum_partial_copy_from_user() is rather pointless since copy_from_user() already cleared the rest of the destination buffer if an exception happened. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/checksum.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index 4f57a4f3909a..740364856355 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum) * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary * - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. + * Copy from userspace and compute checksum. */ static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - int missing; - - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); + if (unlikely(copy_from_user(dst, src, len))) *err_ptr = -EFAULT; - } - return csum_partial(dst, len, sum); } -- cgit v1.2.3 From 6967037baf08860407d275fdd6475d2ee41bd9b7 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Fri, 28 Feb 2014 17:35:52 +0100 Subject: s390/appldata_os: fix cpu array size calculation The cpu array size calculation uses the NR_CPUS config option, which was recently increased from 64 to 256. With a value of 256, the cpu array will no longer fit into one APPLDATA record and loading the appldata_os module fails with the following error: could not insert 'appldata_os': Cannot allocate memory Use num_possible_cpus() instead of NR_CPUS. For z/VM, this will still result in a value of 64. This is not true for LPAR, but the appldata feature is not available for LPAR. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index de8e2b3b0180..69b23b25ac34 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -171,7 +171,7 @@ static int __init appldata_os_init(void) int rc, max_size; max_size = sizeof(struct appldata_os_data) + - (NR_CPUS * sizeof(struct appldata_os_per_cpu)); + (num_possible_cpus() * sizeof(struct appldata_os_per_cpu)); if (max_size > APPLDATA_MAX_REC_SIZE) { pr_err("Maximum OS record size %i exceeds the maximum " "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE); -- cgit v1.2.3 From 0563416b8ac2711f315b50aa6efc84b112e5420a Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Wed, 26 Feb 2014 18:13:06 -0800 Subject: s390: select CONFIG_TTY for use of tty in unconditional keyboard driver The unconditionally built keyboard driver, drivers/s390/char/keyboard.c, requires CONFIG_TTY, so select it from CONFIG_S390 to prevent a build error. Signed-off-by: Josh Triplett Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 86db5a8e6586..cbe56154c6e3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -140,6 +140,7 @@ config S390 select OLD_SIGACTION select OLD_SIGSUSPEND3 select SYSCTL_EXCEPTION_TRACE + select TTY select VIRT_CPU_ACCOUNTING select VIRT_TO_BUS -- cgit v1.2.3 From fa2a0627ac70d007c313da150d9bf3705729e9c8 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 6 Mar 2014 11:24:41 +0100 Subject: s390/compat: remove compat exec domain The whole compat exec domain code doesn't make any difference. From the registered s390_exec_domain: - exec domain name is only displayed in /proc/execdomains - handler is unused - pers_low and pers_high are only used internally to find this specific exec domain otherwise the default exec domain will be used - all other fields match the default exec domain So let's get rid of this. Reported-by: Thomas Gleixner Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/Makefile | 5 ++--- arch/s390/kernel/compat_exec_domain.c | 29 ----------------------------- 2 files changed, 2 insertions(+), 32 deletions(-) delete mode 100644 arch/s390/kernel/compat_exec_domain.c (limited to 'arch/s390') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 1b3ac09c11b6..a95c4ca99617 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -47,9 +47,8 @@ obj-$(CONFIG_SCHED_BOOK) += topology.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o -obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ - compat_wrapper.o compat_exec_domain.o \ - $(compat-obj-y) +obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o +obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y) obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c deleted file mode 100644 index 765fabdada9f..000000000000 --- a/arch/s390/kernel/compat_exec_domain.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for 32-bit Linux for S390 personality. - * - * Copyright IBM Corp. 2000 - * Author(s): Gerhard Tonn (ton@de.ibm.com) - * - * - */ - -#include -#include -#include -#include - -static struct exec_domain s390_exec_domain; - -static int __init s390_init (void) -{ - s390_exec_domain.name = "Linux/s390"; - s390_exec_domain.handler = NULL; - s390_exec_domain.pers_low = PER_LINUX32; - s390_exec_domain.pers_high = PER_LINUX32; - s390_exec_domain.signal_map = default_exec_domain.signal_map; - s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap; - register_exec_domain(&s390_exec_domain); - return 0; -} - -__initcall(s390_init); -- cgit v1.2.3 From 43eab381181c76a19734298b5c925c45fb1afe45 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Wed, 26 Feb 2014 16:19:22 +0100 Subject: s390/topology: Remove call to update_cpu_masks() The call to update_cpu_masks() from within topology_init() is completely redundant. This patch removes it. Signed-off-by: Philipp Hachtmann Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/topology.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 4b2e3e317004..6298fed11ced 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -451,7 +451,6 @@ static int __init topology_init(void) } set_topology_timer(); out: - update_cpu_masks(); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); -- cgit v1.2.3 From 443fc8a3e01cef95f2c7c26605457d49e63da6b1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 11 Mar 2014 10:49:20 +0100 Subject: s390/perf: make print_debug_cf() static Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 5d2dfa31c4ef..61595c1f0a0f 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -121,7 +121,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) : PERF_RECORD_MISC_KERNEL; } -void print_debug_cf(void) +static void print_debug_cf(void) { struct cpumf_ctr_info cf_info; int cpu = smp_processor_id(); -- cgit v1.2.3 From 818a330c4e1be9c39fa7ca9221e044907d92b4bb Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Mar 2014 12:01:08 +0100 Subject: s390/ptrace: add support for PTRACE_SINGLEBLOCK The PTRACE_SINGLEBLOCK option is used to get control whenever the inferior has executed a successful branch. The PER option to implement block stepping is successful-branching event, bit 32 in the PER-event mask. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ptrace.h | 1 + arch/s390/include/asm/thread_info.h | 1 + arch/s390/include/uapi/asm/ptrace.h | 6 ++++++ arch/s390/kernel/ptrace.c | 13 ++++++++++++- 4 files changed, 20 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 9c82cebddabd..f4783c0b7b43 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -83,6 +83,7 @@ struct per_struct_kernel { * These are defined as per linux/ptrace.h, which see. */ #define arch_has_single_step() (1) +#define arch_has_block_step() (1) #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index d2e53d667a92..3ccd71b90345 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ #define TIF_SINGLE_STEP 20 /* This task is single stepped */ +#define TIF_BLOCK_STEP 21 /* This task is block stepped */ #define _TIF_SYSCALL (1<. + */ +#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ + /* * PT_PROT definition is loosely based on hppa bsd definition in * gdb/hppab-nat.c diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index f6be6087a0e9..4ac8fafec95f 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -85,7 +85,10 @@ void update_cr_regs(struct task_struct *task) /* merge TIF_SINGLE_STEP into user specified PER registers. */ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { - new.control |= PER_EVENT_IFETCH; + if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) + new.control |= PER_EVENT_BRANCH; + else + new.control |= PER_EVENT_IFETCH; #ifdef CONFIG_64BIT new.control |= PER_CONTROL_SUSPENSION; new.control |= PER_EVENT_TRANSACTION_END; @@ -107,14 +110,22 @@ void update_cr_regs(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { + clear_tsk_thread_flag(task, TIF_BLOCK_STEP); set_tsk_thread_flag(task, TIF_SINGLE_STEP); } void user_disable_single_step(struct task_struct *task) { + clear_tsk_thread_flag(task, TIF_BLOCK_STEP); clear_tsk_thread_flag(task, TIF_SINGLE_STEP); } +void user_enable_block_step(struct task_struct *task) +{ + set_tsk_thread_flag(task, TIF_SINGLE_STEP); + set_tsk_thread_flag(task, TIF_BLOCK_STEP); +} + /* * Called by kernel/ptrace.c when detaching.. * -- cgit v1.2.3 From 184d0f0c06afb8f3b583db7b8d712caae06a24dd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Mar 2014 18:02:01 +0100 Subject: s390: update defconfigs Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/configs/default_defconfig | 46 ++++++++++++++++++++++++++++----- arch/s390/configs/gcov_defconfig | 33 ++++++++++++++++++----- arch/s390/configs/performance_defconfig | 33 ++++++++++++++++++----- arch/s390/configs/zfcpdump_defconfig | 1 - arch/s390/defconfig | 10 +++++++ 5 files changed, 103 insertions(+), 20 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index e0af2ee58751..ddaae2f5c913 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -46,6 +46,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z9_109=y +CONFIG_NR_CPUS=256 CONFIG_PREEMPT=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y @@ -58,7 +59,6 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_CHSC_SCH=y CONFIG_CRASH_DUMP=y -CONFIG_ZFCPDUMP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -101,7 +101,6 @@ CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m @@ -111,6 +110,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m CONFIG_INET6_XFRM_MODE_TUNNEL=m CONFIG_INET6_XFRM_MODE_BEET=m CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_GRE=m CONFIG_IPV6_MULTIPLE_TABLES=y @@ -135,7 +135,17 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NETFILTER_TPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -204,7 +214,9 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m @@ -227,6 +239,11 @@ CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -249,6 +266,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -268,6 +288,7 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m @@ -314,6 +335,7 @@ CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -381,8 +403,8 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m @@ -434,7 +456,6 @@ CONFIG_TN3270_FS=y CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m -CONFIG_ZVM_WATCHDOG=m # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -534,13 +555,23 @@ CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_SELFTEST=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y CONFIG_SLUB_DEBUG_ON=y CONFIG_SLUB_STATS=y +CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_RB=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_HUNG_TASK=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_RT_MUTEX_TESTER=y @@ -573,9 +604,11 @@ CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_KPROBE_EVENT is not set CONFIG_LKDTM=m +CONFIG_TEST_LIST_SORT=y CONFIG_KPROBES_SANITY_TEST=y -CONFIG_RBTREE_TEST=m +CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m +CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_DMA_API_DEBUG=y # CONFIG_STRICT_DEVMEM is not set @@ -638,7 +671,6 @@ CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_ASYMMETRIC_KEY_TYPE=m CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_PUBLIC_KEY_ALGO_RSA=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index b9f6b4cab927..c81a74e3e25a 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -46,6 +46,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z9_109=y +CONFIG_NR_CPUS=256 CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y @@ -56,7 +57,6 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_CHSC_SCH=y CONFIG_CRASH_DUMP=y -CONFIG_ZFCPDUMP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -99,7 +99,6 @@ CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m @@ -109,6 +108,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m CONFIG_INET6_XFRM_MODE_TUNNEL=m CONFIG_INET6_XFRM_MODE_BEET=m CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_GRE=m CONFIG_IPV6_MULTIPLE_TABLES=y @@ -133,7 +133,17 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NETFILTER_TPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -202,7 +212,9 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m @@ -225,6 +237,11 @@ CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -247,6 +264,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -266,6 +286,7 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m @@ -311,6 +332,7 @@ CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -378,8 +400,8 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m @@ -431,7 +453,6 @@ CONFIG_TN3270_FS=y CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m -CONFIG_ZVM_WATCHDOG=m # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -540,6 +561,7 @@ CONFIG_BLK_DEV_IO_TRACE=y CONFIG_LKDTM=m CONFIG_RBTREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m +CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y # CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y @@ -601,7 +623,6 @@ CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_ASYMMETRIC_KEY_TYPE=m CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_PUBLIC_KEY_ALGO_RSA=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 91087b43e8fa..b5ba8fe1cc64 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -44,6 +44,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z9_109=y +CONFIG_NR_CPUS=256 CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y @@ -54,7 +55,6 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_CHSC_SCH=y CONFIG_CRASH_DUMP=y -CONFIG_ZFCPDUMP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -97,7 +97,6 @@ CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m @@ -107,6 +106,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m CONFIG_INET6_XFRM_MODE_TUNNEL=m CONFIG_INET6_XFRM_MODE_BEET=m CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_GRE=m CONFIG_IPV6_MULTIPLE_TABLES=y @@ -131,7 +131,17 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NETFILTER_TPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -200,7 +210,9 @@ CONFIG_IP_SET_HASH_IP=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m CONFIG_IP_SET_HASH_NETPORT=m CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m @@ -223,6 +235,11 @@ CONFIG_IP_VS_FTP=m CONFIG_IP_VS_PE_SIP=m CONFIG_NF_CONNTRACK_IPV4=m # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NF_TABLES_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -245,6 +262,9 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -264,6 +284,7 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m @@ -309,6 +330,7 @@ CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -376,8 +398,8 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m -CONFIG_DM_RAID=m CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m @@ -429,7 +451,6 @@ CONFIG_TN3270_FS=y CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m -CONFIG_ZVM_WATCHDOG=m # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -532,6 +553,7 @@ CONFIG_LATENCYTOP=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_KPROBE_EVENT is not set CONFIG_LKDTM=m +CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y # CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y @@ -593,7 +615,6 @@ CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_ASYMMETRIC_KEY_TYPE=m CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_PUBLIC_KEY_ALGO_RSA=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index d725c4d956e4..cef073ca1f07 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -19,7 +19,6 @@ CONFIG_HZ_100=y # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set CONFIG_CRASH_DUMP=y -CONFIG_ZFCPDUMP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SECCOMP is not set # CONFIG_IUCV is not set diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 33f57514f424..4557cb7ffddf 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -40,6 +40,7 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z196=y +CONFIG_NR_CPUS=256 CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y @@ -122,22 +123,31 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DETECT_HUNG_TASK=y CONFIG_TIMER_STATS=y +CONFIG_DEBUG_RT_MUTEXES=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_WRITECOUNT=y CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_TRACE=y CONFIG_LATENCYTOP=y +CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_KPROBES_SANITY_TEST=y # CONFIG_STRICT_DEVMEM is not set +CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m -- cgit v1.2.3 From 36a554021b44d146178ae98e598c9502a1269f7d Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Mon, 17 Mar 2014 11:31:28 +0100 Subject: hypfs: Add clarification for "weight_min" attribute The "weight_min" attribute got the wrong name. The value represents the number of non-stopped (operating) CPUS. Therefore add a note and rename the struct member to "ocpus". Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/hypfs/hypfs_vm.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 24908ce149f1..32040ace00ea 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -32,7 +32,7 @@ struct diag2fc_data { __u32 pcpus; __u32 lcpus; __u32 vcpus; - __u32 cpu_min; + __u32 ocpus; __u32 cpu_max; __u32 cpu_shares; __u32 cpu_use_samp; @@ -142,7 +142,12 @@ static int hpyfs_vm_create_guest(struct dentry *systems_dir, ATTRIBUTE(cpus_dir, "capped", capped_value); ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag); ATTRIBUTE(cpus_dir, "count", data->vcpus); - ATTRIBUTE(cpus_dir, "weight_min", data->cpu_min); + /* + * Note: The "weight_min" attribute got the wrong name. + * The value represents the number of non-stopped (operating) + * CPUS. + */ + ATTRIBUTE(cpus_dir, "weight_min", data->ocpus); ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max); ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares); -- cgit v1.2.3 From cf813db0b448b45b454f0983329c3c7b007f9ab7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 10 Mar 2014 14:50:16 +0100 Subject: s390/smp: limit number of cpus in possible cpu mask Limit the number of bits to the maximum number of cpus a machine can have. possible_cpu_mask typically will have more bits set than a machine may physically have. This results in wasted memory during per-cpu memory allocations, if the possible mask contains more cpus than physically possible for a given configuration. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/sclp.h | 1 + arch/s390/kernel/smp.c | 8 ++++---- drivers/s390/char/sclp_early.c | 23 +++++++++++++++++++++-- 3 files changed, 26 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index abaca2275c7a..2f5e9932b4de 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -46,6 +46,7 @@ int sclp_cpu_configure(u8 cpu); int sclp_cpu_deconfigure(u8 cpu); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); +unsigned int sclp_get_max_cpu(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a7125b62a9a6..8827883310dd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -773,11 +773,11 @@ void __noreturn cpu_die(void) void __init smp_fill_possible_mask(void) { - unsigned int possible, cpu; + unsigned int possible, sclp, cpu; - possible = setup_possible_cpus; - if (!possible) - possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; + sclp = sclp_get_max_cpu() ?: nr_cpu_ids; + possible = setup_possible_cpus ?: nr_cpu_ids; + possible = min(possible, sclp); for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) set_cpu_possible(cpu, true); } diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 2c6aac66e2b0..14196ea0fdf3 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -20,7 +20,9 @@ struct read_info_sccb { struct sccb_header header; /* 0-7 */ u16 rnmax; /* 8-9 */ u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ + u8 _reserved0[16 - 11]; /* 11-15 */ + u16 ncpurl; /* 16-17 */ + u8 _reserved7[24 - 18]; /* 18-23 */ u8 loadparm[8]; /* 24-31 */ u8 _reserved1[48 - 32]; /* 32-47 */ u64 facilities; /* 48-55 */ @@ -32,13 +34,16 @@ struct read_info_sccb { u8 _reserved4[100 - 92]; /* 92-99 */ u32 rnsize2; /* 100-103 */ u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ + u8 _reserved5[120 - 112]; /* 112-119 */ + u16 hcpua; /* 120-121 */ + u8 _reserved6[4096 - 122]; /* 122-4095 */ } __packed __aligned(PAGE_SIZE); static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; static unsigned int sclp_con_has_vt220 __initdata; static unsigned int sclp_con_has_linemode __initdata; static unsigned long sclp_hsa_size; +static unsigned int sclp_max_cpu; static struct sclp_ipl_info sclp_ipl_info; u64 sclp_facilities; @@ -102,6 +107,15 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp_rzm <<= 20; + if (!sccb->hcpua) { + if (MACHINE_IS_VM) + sclp_max_cpu = 64; + else + sclp_max_cpu = sccb->ncpurl; + } else { + sclp_max_cpu = sccb->hcpua + 1; + } + /* Save IPL information */ sclp_ipl_info.is_valid = 1; if (sccb->flags & 0x2) @@ -129,6 +143,11 @@ unsigned long long sclp_get_rzm(void) return sclp_rzm; } +unsigned int sclp_get_max_cpu(void) +{ + return sclp_max_cpu; +} + /* * This function will be called after sclp_facilities_detect(), which gets * called from early.c code. The sclp_facilities_detect() function retrieves -- cgit v1.2.3 From c7c5be73ccc05da9899b313b9fa0042aae56502f Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Wed, 19 Mar 2014 10:13:22 +0100 Subject: s390/mm: fixing comment so that parameter name match Signed-off-by: Dominik Dingel Signed-off-by: Martin Schwidefsky --- arch/s390/mm/pgtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f8b58a7a3048..27a1b931f17d 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -748,7 +748,7 @@ EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); /** * gmap_ipte_notify - mark a range of ptes for invalidation notification * @gmap: pointer to guest mapping meta data structure - * @address: virtual address in the guest address space + * @start: virtual address in the guest address space * @len: size of area * * Returns 0 if for each page in the given range a gmap mapping exists and -- cgit v1.2.3 From aaeff84a2dfa224611fc9fee89cb20277469c454 Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Wed, 19 Mar 2014 10:18:49 +0100 Subject: s390/mm: remove unnecessary parameter from gmap_do_ipte_notify Signed-off-by: Dominik Dingel Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 4 ++-- arch/s390/mm/pgtable.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a7dd672c97f8..9602154401d2 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -841,7 +841,7 @@ void __gmap_zap(unsigned long address, struct gmap *); void gmap_register_ipte_notifier(struct gmap_notifier *); void gmap_unregister_ipte_notifier(struct gmap_notifier *); int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); -void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); +void gmap_do_ipte_notify(struct mm_struct *, pte_t *); static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, unsigned long addr, @@ -850,7 +850,7 @@ static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, #ifdef CONFIG_PGSTE if (pgste_val(pgste) & PGSTE_IN_BIT) { pgste_val(pgste) &= ~PGSTE_IN_BIT; - gmap_do_ipte_notify(mm, addr, ptep); + gmap_do_ipte_notify(mm, ptep); } #endif return pgste; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 27a1b931f17d..796c9320c709 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -802,13 +802,12 @@ EXPORT_SYMBOL_GPL(gmap_ipte_notify); /** * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. * @mm: pointer to the process mm_struct - * @addr: virtual address in the process address space * @pte: pointer to the page table entry * * This function is assumed to be called with the page table lock held * for the pte to notify. */ -void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) +void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte) { unsigned long segment_offset; struct gmap_notifier *nb; -- cgit v1.2.3 From 6e5a40a49fe16f8032c4dcd4fa5ff866da09d445 Mon Sep 17 00:00:00 2001 From: Dominik Dingel Date: Wed, 19 Mar 2014 10:19:35 +0100 Subject: s390/mm: remove unecessary parameter from pgste_ipte_notify Signed-off-by: Dominik Dingel Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9602154401d2..1ab75eaacbd4 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -844,7 +844,6 @@ int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); void gmap_do_ipte_notify(struct mm_struct *, pte_t *); static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE @@ -1104,7 +1103,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } pte = *ptep; @@ -1150,7 +1149,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; @@ -1174,7 +1173,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_ipte_notify(mm, address, ptep, pgste); + pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; @@ -1211,7 +1210,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } pte = *ptep; @@ -1245,7 +1244,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, if (!full && mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; @@ -1270,7 +1269,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (pte_write(pte)) { if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } ptep_flush_lazy(mm, address, ptep); @@ -1296,7 +1295,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, return 0; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } ptep_flush_direct(vma->vm_mm, address, ptep); -- cgit v1.2.3