summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-07-20 10:57:08 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2009-07-20 10:57:08 +1000
commit7f96f24521c047e5d501d5f0722e092a22f9764c (patch)
treea9f14bac76c02b26fbed7ff0a70101612afb2232 /arch
parent5f16801777530edf3886c0e1ebd1a8e111eb10f7 (diff)
parent9e6effaf5d253d66bc81e034c045bd716d97e621 (diff)
Merge commit 's390/features'
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/include/asm/atomic.h205
-rw-r--r--arch/s390/include/asm/checksum.h25
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h223
-rw-r--r--arch/s390/include/asm/scsw.h956
-rw-r--r--arch/s390/include/asm/timex.h16
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/mcount.S147
-rw-r--r--arch/s390/kernel/mcount64.S78
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/suspend.c73
-rw-r--r--arch/s390/kernel/swsusp_asm64.S (renamed from arch/s390/power/swsusp_asm64.S)37
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/power/Makefile8
-rw-r--r--arch/s390/power/suspend.c40
-rw-r--r--arch/s390/power/swsusp.c30
-rw-r--r--arch/s390/power/swsusp_64.c17
22 files changed, 1346 insertions, 580 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2ae5d72f47ed..47836b945d03 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,7 +95,6 @@ config S390
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
select HAVE_PERF_COUNTERS
- select GENERIC_ATOMIC64 if !64BIT
config SCHED_OMIT_FRAME_POINTER
bool
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0ff387cebf88..fc8fb20e7fc0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
- arch/s390/power/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c7d0abfb0f00..ae7c8f9f94a5 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,33 +1,23 @@
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
-#include <linux/compiler.h>
-#include <linux/types.h>
-
/*
- * include/asm-s390/atomic.h
+ * Copyright 1999,2009 IBM Corp.
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Denis Joseph Barrow,
+ * Arnd Bergmann <arndb@de.ibm.com>,
*
- * S390 version
- * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Denis Joseph Barrow,
- * Arnd Bergmann (arndb@de.ibm.com)
- *
- * Derived from "include/asm-i386/bitops.h"
- * Copyright (C) 1992, Linus Torvalds
+ * Atomic operations that C can't guarantee us.
+ * Useful for resource counting etc.
+ * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
*
*/
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
- */
+#include <linux/compiler.h>
+#include <linux/types.h>
#define ATOMIC_INIT(i) { (i) }
-#ifdef __KERNEL__
-
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)
barrier();
}
-static __inline__ int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "ar");
}
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "sr");
}
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
+static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
- __CS_LOOP(v, ~mask, "nr");
+ __CS_LOOP(v, ~mask, "nr");
}
-static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
+static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
{
- __CS_LOOP(v, mask, "or");
+ __CS_LOOP(v, mask, "or");
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile(
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
return old;
}
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#undef __CS_LOOP
-#ifdef __s390x__
#define ATOMIC64_INIT(i) { (i) }
+#ifdef CONFIG_64BIT
+
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
: "=&d" (old_val), "=&d" (new_val), \
"=Q" (((atomic_t *)(ptr))->counter) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
- : "cc", "memory" ); \
+ : "cc", "memory"); \
new_val; \
})
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
- : "cc", "memory" ); \
+ : "cc", "memory"); \
new_val; \
})
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)
barrier();
}
-static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "agr");
}
-#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
-#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "sgr");
}
-#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
-#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
-static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
+static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, ~mask, "ngr");
+ __CSG_LOOP(v, ~mask, "ngr");
}
-static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
+static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, mask, "ogr");
+ __CSG_LOOP(v, mask, "ogr");
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
+static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
-static __inline__ int atomic64_add_unless(atomic64_t *v,
- long long a, long long u)
+#undef __CSG_LOOP
+
+#else /* CONFIG_64BIT */
+
+typedef struct {
+ long long counter;
+} atomic64_t;
+
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ register_pair rp;
+
+ asm volatile(
+ " lm %0,%N0,0(%1)"
+ : "=&d" (rp)
+ : "a" (&v->counter), "m" (v->counter)
+ );
+ return rp.pair;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ register_pair rp = {.pair = i};
+
+ asm volatile(
+ " stm %1,%N1,0(%2)"
+ : "=m" (v->counter)
+ : "d" (rp), "a" (&v->counter)
+ );
+}
+
+static inline long long atomic64_xchg(atomic64_t *v, long long new)
+{
+ register_pair rp_new = {.pair = new};
+ register_pair rp_old;
+
+ asm volatile(
+ " lm %0,%N0,0(%2)\n"
+ "0: cds %0,%3,0(%2)\n"
+ " jl 0b\n"
+ : "=&d" (rp_old), "+m" (v->counter)
+ : "a" (&v->counter), "d" (rp_new)
+ : "cc");
+ return rp_old.pair;
+}
+
+static inline long long atomic64_cmpxchg(atomic64_t *v,
+ long long old, long long new)
+{
+ register_pair rp_old = {.pair = old};
+ register_pair rp_new = {.pair = new};
+
+ asm volatile(
+ " cds %0,%3,0(%2)"
+ : "+&d" (rp_old), "+m" (v->counter)
+ : "a" (&v->counter), "d" (rp_new)
+ : "cc");
+ return rp_old.pair;
+}
+
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old + i;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old - i;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old | mask;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+}
+
+static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old & mask;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+}
+
+#endif /* CONFIG_64BIT */
+
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long c, old;
c = atomic64_read(v);
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
return c != u;
}
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#undef __CSG_LOOP
-
-#else /* __s390x__ */
-
-#include <asm-generic/atomic64.h>
-
-#endif /* __s390x__ */
+#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
+#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
+#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
+#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
+#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic-long.h>
-#endif /* __KERNEL__ */
+
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d5a8e7c1477c..6c00f6800a34 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
*/
static inline __sum16 csum_fold(__wsum sum)
{
-#ifndef __s390x__
- register_pair rp;
+ u32 csum = (__force u32) sum;
- asm volatile(
- " slr %N1,%N1\n" /* %0 = H L */
- " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
- " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
- " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
- " alr %0,%1\n" /* %0 = H+L+C L+H */
- " srl %0,16\n" /* %0 = H+L+C */
- : "+&d" (sum), "=d" (rp) : : "cc");
-#else /* __s390x__ */
- asm volatile(
- " sr 3,3\n" /* %0 = H*65536 + L */
- " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
- " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
- " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
- " alr %0,2\n" /* %0 = H+L+C L+H */
- " srl %0,16\n" /* %0 = H+L+C */
- : "+&d" (sum) : : "cc", "2", "3");
-#endif /* __s390x__ */
- return (__force __sum16) ~sum;
+ csum += (csum >> 16) + (csum << 16);
+ csum >>= 16;
+ return (__force __sum16) ~csum;
}
/*
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 807997f7414b..4943654ed7fd 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,4 +125,32 @@ struct chsc_cpd_info {
#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
+#ifdef __KERNEL__
+
+struct css_general_char {
+ u64 : 12;
+ u32 dynio : 1; /* bit 12 */
+ u32 : 28;
+ u32 aif : 1; /* bit 41 */
+ u32 : 3;
+ u32 mcss : 1; /* bit 45 */
+ u32 fcs : 1; /* bit 46 */
+ u32 : 1;
+ u32 ext_mb : 1; /* bit 48 */
+ u32 : 7;
+ u32 aif_tdd : 1; /* bit 56 */
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
+ u32 aif_osa : 1; /* bit 67 */
+ u32 : 14;
+ u32 cib : 1; /* bit 82 */
+ u32 : 5;
+ u32 fcx : 1; /* bit 88 */
+ u32 : 7;
+}__attribute__((packed));
+
+extern struct css_general_char css_general_characteristics;
+
+#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 619bf94b11f1..e85679af54dd 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -15,228 +15,7 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
-/**
- * struct cmd_scsw - command-mode subchannel status word
- * @key: subchannel key
- * @sctl: suspend control
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @pfch: prefetch
- * @isic: initial-status interruption control
- * @alcc: address-limit checking control
- * @ssi: suppress-suspended interruption
- * @zcc: zero condition code
- * @ectl: extended control
- * @pno: path not operational
- * @res: reserved
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @cpa: channel program address
- * @dstat: device status
- * @cstat: subchannel status
- * @count: residual count
- */
-struct cmd_scsw {
- __u32 key : 4;
- __u32 sctl : 1;
- __u32 eswf : 1;
- __u32 cc : 2;
- __u32 fmt : 1;
- __u32 pfch : 1;
- __u32 isic : 1;
- __u32 alcc : 1;
- __u32 ssi : 1;
- __u32 zcc : 1;
- __u32 ectl : 1;
- __u32 pno : 1;
- __u32 res : 1;
- __u32 fctl : 3;
- __u32 actl : 7;
- __u32 stctl : 5;
- __u32 cpa;
- __u32 dstat : 8;
- __u32 cstat : 8;
- __u32 count : 16;
-} __attribute__ ((packed));
-
-/**
- * struct tm_scsw - transport-mode subchannel status word
- * @key: subchannel key
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @x: IRB-format control
- * @q: interrogate-complete
- * @ectl: extended control
- * @pno: path not operational
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @tcw: TCW address
- * @dstat: device status
- * @cstat: subchannel status
- * @fcxs: FCX status
- * @schxs: subchannel-extended status
- */
-struct tm_scsw {
- u32 key:4;
- u32 :1;
- u32 eswf:1;
- u32 cc:2;
- u32 fmt:3;
- u32 x:1;
- u32 q:1;
- u32 :1;
- u32 ectl:1;
- u32 pno:1;
- u32 :1;
- u32 fctl:3;
- u32 actl:7;
- u32 stctl:5;
- u32 tcw;
- u32 dstat:8;
- u32 cstat:8;
- u32 fcxs:8;
- u32 schxs:8;
-} __attribute__ ((packed));
-
-/**
- * union scsw - subchannel status word
- * @cmd: command-mode SCSW
- * @tm: transport-mode SCSW
- */
-union scsw {
- struct cmd_scsw cmd;
- struct tm_scsw tm;
-} __attribute__ ((packed));
-
-int scsw_is_tm(union scsw *scsw);
-u32 scsw_key(union scsw *scsw);
-u32 scsw_eswf(union scsw *scsw);
-u32 scsw_cc(union scsw *scsw);
-u32 scsw_ectl(union scsw *scsw);
-u32 scsw_pno(union scsw *scsw);
-u32 scsw_fctl(union scsw *scsw);
-u32 scsw_actl(union scsw *scsw);
-u32 scsw_stctl(union scsw *scsw);
-u32 scsw_dstat(union scsw *scsw);
-u32 scsw_cstat(union scsw *scsw);
-int scsw_is_solicited(union scsw *scsw);
-int scsw_is_valid_key(union scsw *scsw);
-int scsw_is_valid_eswf(union scsw *scsw);
-int scsw_is_valid_cc(union scsw *scsw);
-int scsw_is_valid_ectl(union scsw *scsw);
-int scsw_is_valid_pno(union scsw *scsw);
-int scsw_is_valid_fctl(union scsw *scsw);
-int scsw_is_valid_actl(union scsw *scsw);
-int scsw_is_valid_stctl(union scsw *scsw);
-int scsw_is_valid_dstat(union scsw *scsw);
-int scsw_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_valid_key(union scsw *scsw);
-int scsw_cmd_is_valid_sctl(union scsw *scsw);
-int scsw_cmd_is_valid_eswf(union scsw *scsw);
-int scsw_cmd_is_valid_cc(union scsw *scsw);
-int scsw_cmd_is_valid_fmt(union scsw *scsw);
-int scsw_cmd_is_valid_pfch(union scsw *scsw);
-int scsw_cmd_is_valid_isic(union scsw *scsw);
-int scsw_cmd_is_valid_alcc(union scsw *scsw);
-int scsw_cmd_is_valid_ssi(union scsw *scsw);
-int scsw_cmd_is_valid_zcc(union scsw *scsw);
-int scsw_cmd_is_valid_ectl(union scsw *scsw);
-int scsw_cmd_is_valid_pno(union scsw *scsw);
-int scsw_cmd_is_valid_fctl(union scsw *scsw);
-int scsw_cmd_is_valid_actl(union scsw *scsw);
-int scsw_cmd_is_valid_stctl(union scsw *scsw);
-int scsw_cmd_is_valid_dstat(union scsw *scsw);
-int scsw_cmd_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_solicited(union scsw *scsw);
-int scsw_tm_is_valid_key(union scsw *scsw);
-int scsw_tm_is_valid_eswf(union scsw *scsw);
-int scsw_tm_is_valid_cc(union scsw *scsw);
-int scsw_tm_is_valid_fmt(union scsw *scsw);
-int scsw_tm_is_valid_x(union scsw *scsw);
-int scsw_tm_is_valid_q(union scsw *scsw);
-int scsw_tm_is_valid_ectl(union scsw *scsw);
-int scsw_tm_is_valid_pno(union scsw *scsw);
-int scsw_tm_is_valid_fctl(union scsw *scsw);
-int scsw_tm_is_valid_actl(union scsw *scsw);
-int scsw_tm_is_valid_stctl(union scsw *scsw);
-int scsw_tm_is_valid_dstat(union scsw *scsw);
-int scsw_tm_is_valid_cstat(union scsw *scsw);
-int scsw_tm_is_valid_fcxs(union scsw *scsw);
-int scsw_tm_is_valid_schxs(union scsw *scsw);
-int scsw_tm_is_solicited(union scsw *scsw);
-
-#define SCSW_FCTL_CLEAR_FUNC 0x1
-#define SCSW_FCTL_HALT_FUNC 0x2
-#define SCSW_FCTL_START_FUNC 0x4
-
-#define SCSW_ACTL_SUSPENDED 0x1
-#define SCSW_ACTL_DEVACT 0x2
-#define SCSW_ACTL_SCHACT 0x4
-#define SCSW_ACTL_CLEAR_PEND 0x8
-#define SCSW_ACTL_HALT_PEND 0x10
-#define SCSW_ACTL_START_PEND 0x20
-#define SCSW_ACTL_RESUME_PEND 0x40
-
-#define SCSW_STCTL_STATUS_PEND 0x1
-#define SCSW_STCTL_SEC_STATUS 0x2
-#define SCSW_STCTL_PRIM_STATUS 0x4
-#define SCSW_STCTL_INTER_STATUS 0x8
-#define SCSW_STCTL_ALERT_STATUS 0x10
-
-#define DEV_STAT_ATTENTION 0x80
-#define DEV_STAT_STAT_MOD 0x40
-#define DEV_STAT_CU_END 0x20
-#define DEV_STAT_BUSY 0x10
-#define DEV_STAT_CHN_END 0x08
-#define DEV_STAT_DEV_END 0x04
-#define DEV_STAT_UNIT_CHECK 0x02
-#define DEV_STAT_UNIT_EXCEP 0x01
-
-#define SCHN_STAT_PCI 0x80
-#define SCHN_STAT_INCORR_LEN 0x40
-#define SCHN_STAT_PROG_CHECK 0x20
-#define SCHN_STAT_PROT_CHECK 0x10
-#define SCHN_STAT_CHN_DATA_CHK 0x08
-#define SCHN_STAT_CHN_CTRL_CHK 0x04
-#define SCHN_STAT_INTF_CTRL_CHK 0x02
-#define SCHN_STAT_CHAIN_CHECK 0x01
-
-/*
- * architectured values for first sense byte
- */
-#define SNS0_CMD_REJECT 0x80
-#define SNS_CMD_REJECT SNS0_CMD_REJEC
-#define SNS0_INTERVENTION_REQ 0x40
-#define SNS0_BUS_OUT_CHECK 0x20
-#define SNS0_EQUIPMENT_CHECK 0x10
-#define SNS0_DATA_CHECK 0x08
-#define SNS0_OVERRUN 0x04
-#define SNS0_INCOMPL_DOMAIN 0x01
-
-/*
- * architectured values for second sense byte
- */
-#define SNS1_PERM_ERR 0x80
-#define SNS1_INV_TRACK_FORMAT 0x40
-#define SNS1_EOC 0x20
-#define SNS1_MESSAGE_TO_OPER 0x10
-#define SNS1_NO_REC_FOUND 0x08
-#define SNS1_FILE_PROTECTED 0x04
-#define SNS1_WRITE_INHIBITED 0x02
-#define SNS1_INPRECISE_END 0x01
-
-/*
- * architectured values for third sense byte
- */
-#define SNS2_REQ_INH_WRITE 0x80
-#define SNS2_CORRECTABLE 0x40
-#define SNS2_FIRST_LOG_ERR 0x20
-#define SNS2_ENV_DATA_PRESENT 0x10
-#define SNS2_INPRECISE_END 0x04
+#include <asm/scsw.h>
/**
* struct ccw1 - channel command word
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
new file mode 100644
index 000000000000..de389cb54d28
--- /dev/null
+++ b/arch/s390/include/asm/scsw.h
@@ -0,0 +1,956 @@
+/*
+ * Helper functions for scsw access.
+ *
+ * Copyright IBM Corp. 2008,2009
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_SCSW_H_
+#define _ASM_S390_SCSW_H_
+
+#include <linux/types.h>
+#include <asm/chsc.h>
+#include <asm/cio.h>
+
+/**
+ * struct cmd_scsw - command-mode subchannel status word
+ * @key: subchannel key
+ * @sctl: suspend control
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @pfch: prefetch
+ * @isic: initial-status interruption control
+ * @alcc: address-limit checking control
+ * @ssi: suppress-suspended interruption
+ * @zcc: zero condition code
+ * @ectl: extended control
+ * @pno: path not operational
+ * @res: reserved
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @cpa: channel program address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @count: residual count
+ */
+struct cmd_scsw {
+ __u32 key : 4;
+ __u32 sctl : 1;
+ __u32 eswf : 1;
+ __u32 cc : 2;
+ __u32 fmt : 1;
+ __u32 pfch : 1;
+ __u32 isic : 1;
+ __u32 alcc : 1;
+ __u32 ssi : 1;
+ __u32 zcc : 1;
+ __u32 ectl : 1;
+ __u32 pno : 1;
+ __u32 res : 1;
+ __u32 fctl : 3;
+ __u32 actl : 7;
+ __u32 stctl : 5;
+ __u32 cpa;
+ __u32 dstat : 8;
+ __u32 cstat : 8;
+ __u32 count : 16;
+} __attribute__ ((packed));
+
+/**
+ * struct tm_scsw - transport-mode subchannel status word
+ * @key: subchannel key
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @x: IRB-format control
+ * @q: interrogate-complete
+ * @ectl: extended control
+ * @pno: path not operational
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @tcw: TCW address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @fcxs: FCX status
+ * @schxs: subchannel-extended status
+ */
+struct tm_scsw {
+ u32 key:4;
+ u32 :1;
+ u32 eswf:1;
+ u32 cc:2;
+ u32 fmt:3;
+ u32 x:1;
+ u32 q:1;
+ u32 :1;
+ u32 ectl:1;
+ u32 pno:1;
+ u32 :1;
+ u32 fctl:3;
+ u32 actl:7;
+ u32 stctl:5;
+ u32 tcw;
+ u32 dstat:8;
+ u32 cstat:8;
+ u32 fcxs:8;
+ u32 schxs:8;
+} __attribute__ ((packed));
+
+/**
+ * union scsw - subchannel status word
+ * @cmd: command-mode SCSW
+ * @tm: transport-mode SCSW
+ */
+union scsw {
+ struct cmd_scsw cmd;
+ struct tm_scsw tm;
+} __attribute__ ((packed));
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1
+#define SCSW_FCTL_HALT_FUNC 0x2
+#define SCSW_FCTL_START_FUNC 0x4
+
+#define SCSW_ACTL_SUSPENDED 0x1
+#define SCSW_ACTL_DEVACT 0x2
+#define SCSW_ACTL_SCHACT 0x4
+#define SCSW_ACTL_CLEAR_PEND 0x8
+#define SCSW_ACTL_HALT_PEND 0x10
+#define SCSW_ACTL_START_PEND 0x20
+#define SCSW_ACTL_RESUME_PEND 0x40
+
+#define SCSW_STCTL_STATUS_PEND 0x1
+#define SCSW_STCTL_SEC_STATUS 0x2
+#define SCSW_STCTL_PRIM_STATUS 0x4
+#define SCSW_STCTL_INTER_STATUS 0x8
+#define SCSW_STCTL_ALERT_STATUS 0x10
+
+#define DEV_STAT_ATTENTION 0x80
+#define DEV_STAT_STAT_MOD 0x40
+#define DEV_STAT_CU_END 0x20
+#define DEV_STAT_BUSY 0x10
+#define DEV_STAT_CHN_END 0x08
+#define DEV_STAT_DEV_END 0x04
+#define DEV_STAT_UNIT_CHECK 0x02
+#define DEV_STAT_UNIT_EXCEP 0x01
+
+#define SCHN_STAT_PCI 0x80
+#define SCHN_STAT_INCORR_LEN 0x40
+#define SCHN_STAT_PROG_CHECK 0x20
+#define SCHN_STAT_PROT_CHECK 0x10
+#define SCHN_STAT_CHN_DATA_CHK 0x08
+#define SCHN_STAT_CHN_CTRL_CHK 0x04
+#define SCHN_STAT_INTF_CTRL_CHK 0x02
+#define SCHN_STAT_CHAIN_CHECK 0x01
+
+/*
+ * architectured values for first sense byte
+ */
+#define SNS0_CMD_REJECT 0x80
+#define SNS_CMD_REJECT SNS0_CMD_REJEC
+#define SNS0_INTERVENTION_REQ 0x40
+#define SNS0_BUS_OUT_CHECK 0x20
+#define SNS0_EQUIPMENT_CHECK 0x10
+#define SNS0_DATA_CHECK 0x08
+#define SNS0_OVERRUN 0x04
+#define SNS0_INCOMPL_DOMAIN 0x01
+
+/*
+ * architectured values for second sense byte
+ */
+#define SNS1_PERM_ERR 0x80
+#define SNS1_INV_TRACK_FORMAT 0x40
+#define SNS1_EOC 0x20
+#define SNS1_MESSAGE_TO_OPER 0x10
+#define SNS1_NO_REC_FOUND 0x08
+#define SNS1_FILE_PROTECTED 0x04
+#define SNS1_WRITE_INHIBITED 0x02
+#define SNS1_INPRECISE_END 0x01
+
+/*
+ * architectured values for third sense byte
+ */
+#define SNS2_REQ_INH_WRITE 0x80
+#define SNS2_CORRECTABLE 0x40
+#define SNS2_FIRST_LOG_ERR 0x20
+#define SNS2_ENV_DATA_PRESENT 0x10
+#define SNS2_INPRECISE_END 0x04
+
+/**
+ * scsw_is_tm - check for transport mode scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the specified scsw is a transport mode scsw, zero
+ * otherwise.
+ */
+static inline int scsw_is_tm(union scsw *scsw)
+{
+ return css_general_characteristics.fcx && (scsw->tm.x == 1);
+}
+
+/**
+ * scsw_key - return scsw key field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the key field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_key(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.key;
+ else
+ return scsw->cmd.key;
+}
+
+/**
+ * scsw_eswf - return scsw eswf field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the eswf field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_eswf(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.eswf;
+ else
+ return scsw->cmd.eswf;
+}
+
+/**
+ * scsw_cc - return scsw cc field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the cc field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_cc(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.cc;
+ else
+ return scsw->cmd.cc;
+}
+
+/**
+ * scsw_ectl - return scsw ectl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the ectl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_ectl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.ectl;
+ else
+ return scsw->cmd.ectl;
+}
+
+/**
+ * scsw_pno - return scsw pno field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the pno field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_pno(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.pno;
+ else
+ return scsw->cmd.pno;
+}
+
+/**
+ * scsw_fctl - return scsw fctl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the fctl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_fctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.fctl;
+ else
+ return scsw->cmd.fctl;
+}
+
+/**
+ * scsw_actl - return scsw actl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the actl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_actl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.actl;
+ else
+ return scsw->cmd.actl;
+}
+
+/**
+ * scsw_stctl - return scsw stctl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the stctl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_stctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.stctl;
+ else
+ return scsw->cmd.stctl;
+}
+
+/**
+ * scsw_dstat - return scsw dstat field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the dstat field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_dstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.dstat;
+ else
+ return scsw->cmd.dstat;
+}
+
+/**
+ * scsw_cstat - return scsw cstat field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the cstat field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_cstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.cstat;
+ else
+ return scsw->cmd.cstat;
+}
+
+/**
+ * scsw_cmd_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_key(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_sctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_cmd_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_cmd_is_valid_fmt - check fmt field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fmt field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_pfch - check pfch field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pfch field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_isic - check isic field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the isic field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_alcc - check alcc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the alcc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_ssi - check ssi field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ssi field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_zcc - check zcc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the zcc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
+}
+
+/**
+ * scsw_cmd_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
+}
+
+/**
+ * scsw_cmd_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
+{
+ return (scsw->cmd.fctl != 0) &&
+ (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
+ ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
+}
+
+/**
+ * scsw_cmd_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->cmd.cc != 3);
+}
+
+/**
+ * scsw_cmd_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->cmd.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_key(union scsw *scsw)
+{
+ return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_tm_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_tm_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_cc(union scsw *scsw)
+{
+ return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_tm_is_valid_fmt - check fmt field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fmt field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_x - check x field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the x field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_x(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_q - check q field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the q field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_q(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
+}
+
+/**
+ * scsw_tm_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_pno(union scsw *scsw)
+{
+ return (scsw->tm.fctl != 0) &&
+ (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
+ ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
+}
+
+/**
+ * scsw_tm_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_actl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->tm.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->tm.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_fcxs - check fcxs field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fcxs field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_schxs - check schxs field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the schxs field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
+{
+ return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
+ SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_PROT_CHECK |
+ SCHN_STAT_CHN_DATA_CHK));
+}
+
+/**
+ * scsw_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_actl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_actl(scsw);
+ else
+ return scsw_cmd_is_valid_actl(scsw);
+}
+
+/**
+ * scsw_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_cc(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_cc(scsw);
+ else
+ return scsw_cmd_is_valid_cc(scsw);
+}
+
+/**
+ * scsw_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_cstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_cstat(scsw);
+ else
+ return scsw_cmd_is_valid_cstat(scsw);
+}
+
+/**
+ * scsw_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_dstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_dstat(scsw);
+ else
+ return scsw_cmd_is_valid_dstat(scsw);
+}
+
+/**
+ * scsw_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_ectl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_ectl(scsw);
+ else
+ return scsw_cmd_is_valid_ectl(scsw);
+}
+
+/**
+ * scsw_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_eswf(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_eswf(scsw);
+ else
+ return scsw_cmd_is_valid_eswf(scsw);
+}
+
+/**
+ * scsw_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_fctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_fctl(scsw);
+ else
+ return scsw_cmd_is_valid_fctl(scsw);
+}
+
+/**
+ * scsw_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_key(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_key(scsw);
+ else
+ return scsw_cmd_is_valid_key(scsw);
+}
+
+/**
+ * scsw_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_pno(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_pno(scsw);
+ else
+ return scsw_cmd_is_valid_pno(scsw);
+}
+
+/**
+ * scsw_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_stctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_stctl(scsw);
+ else
+ return scsw_cmd_is_valid_stctl(scsw);
+}
+
+/**
+ * scsw_cmd_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the command mode scsw indicates that the associated
+ * status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_cmd_is_solicited(union scsw *scsw)
+{
+ return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
+}
+
+/**
+ * scsw_tm_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the transport mode scsw indicates that the associated
+ * status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_tm_is_solicited(union scsw *scsw)
+{
+ return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
+}
+
+/**
+ * scsw_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the transport or command mode scsw indicates that the
+ * associated status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_is_solicited(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_solicited(scsw);
+ else
+ return scsw_cmd_is_solicited(scsw);
+}
+
+#endif /* _ASM_S390_SCSW_H_ */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index cc21e3e20fd7..e4e7552d8151 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -88,6 +88,20 @@ int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-extern u64 sched_clock_base_cc;
+extern u64 sched_clock_base;
+
+/**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+ * The caller must ensure that preemption is disabled.
+ * The clock and sched_clock_base get changed via stop_machine.
+ * Therefore preemption must be disabled when calling this
+ * function, otherwise the returned value is not guaranteed to
+ * be monotonic.
+ */
+static inline unsigned long long get_clock_monotonic(void)
+{
+ return get_clock_xt() - sched_clock_base;
+}
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c75ed43b1a18..c7be8e10b87e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
-
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc9..7149c2f6d433 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -52,7 +52,7 @@ static void __init reset_tod_clock(void)
if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
disabled_wait(0);
- sched_clock_base_cc = TOD_UNIX_EPOCH;
+ sched_clock_base = TOD_UNIX_EPOCH;
}
#ifdef CONFIG_SHARED_KERNEL
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_VM;
}
-static void early_pgm_check_handler(void)
+static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-void setup_lowcore_early(void)
+static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..d5e6ee3b2c4f 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -535,7 +535,7 @@ startup:basr %r13,0 # get base
b 0(%r13)
.align 4
4: .long startup_continue
-5: .long sched_clock_base_cc
+5: .long sched_clock_base
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 2a0a5e97ba8c..dfe015d7398c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -11,111 +11,27 @@
ftrace_stub:
br %r14
-#ifdef CONFIG_64BIT
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
.globl _mcount
_mcount:
- br %r14
-
- .globl ftrace_caller
-ftrace_caller:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_dyn_func
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl ftrace_graph_caller
-ftrace_graph_caller:
- # This unconditional branch gets runtime patched. Change only if
- # you know what you are doing. See ftrace_enable_graph_caller().
- j 0f
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-0:
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
- .quad ftrace_stub
+ .long ftrace_stub
.previous
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_trace_function
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
- br %r14
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
- .globl return_to_handler
-return_to_handler:
- stmg %r2,%r5,32(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- brasl %r14,ftrace_return_to_handler
- aghi %r15,160
- lgr %r14,%r2
- lmg %r2,%r5,32(%r15)
- br %r14
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#else /* CONFIG_64BIT */
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .globl _mcount
-_mcount:
- br %r14
-
.globl ftrace_caller
ftrace_caller:
+#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
+#ifdef CONFIG_DYNAMIC_FTRACE
+0: .long ftrace_dyn_func
+#else
0: .long ftrace_trace_function
+#endif
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
@@ -131,53 +47,13 @@ ftrace_caller:
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
- bras %r1,0f
- .long prepare_ftrace_return
-0: l %r2,152(%r15)
- l %r4,0(%r1)
- l %r3,100(%r15)
- basr %r14,%r4
- st %r2,100(%r15)
-1:
#endif
- ahi %r15,96
- l %r14,56(%r15)
-3: lm %r2,%r5,16(%r15)
- br %r14
-
- .data
- .globl ftrace_dyn_func
-ftrace_dyn_func:
- .long ftrace_stub
- .previous
-
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- stm %r2,%r5,16(%r15)
- bras %r1,2f
-0: .long ftrace_trace_function
-1: .long function_trace_stop
-2: l %r2,1b-0b(%r1)
- icm %r2,0xf,0(%r2)
- jnz 3f
- st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- l %r3,100(%r15)
- la %r2,0(%r14)
- st %r0,__SF_BACKCHAIN(%r15)
- la %r3,0(%r3)
- l %r14,0b-0b(%r1)
- l %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
@@ -185,14 +61,13 @@ _mcount:
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
+1:
#endif
ahi %r15,96
l %r14,56(%r15)
3: lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
@@ -211,6 +86,4 @@ return_to_handler:
lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#endif /* CONFIG_64BIT */
+#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
new file mode 100644
index 000000000000..c37211c6092b
--- /dev/null
+++ b/arch/s390/kernel/mcount64.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright IBM Corp. 2008,2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <asm/asm-offsets.h>
+
+ .globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+ .globl _mcount
+_mcount:
+#ifdef CONFIG_DYNAMIC_FTRACE
+ br %r14
+
+ .data
+ .globl ftrace_dyn_func
+ftrace_dyn_func:
+ .quad ftrace_stub
+ .previous
+
+ .globl ftrace_caller
+ftrace_caller:
+#endif
+ larl %r1,function_trace_stop
+ icm %r1,0xf,0(%r1)
+ bnzr %r14
+ stmg %r2,%r5,32(%r15)
+ stg %r14,112(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ lgr %r2,%r14
+ lg %r3,168(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ larl %r14,ftrace_dyn_func
+#else
+ larl %r14,ftrace_trace_function
+#endif
+ lg %r14,0(%r14)
+ basr %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
+ # This unconditional branch gets runtime patched. Change only if
+ # you know what you are doing. See ftrace_enable_graph_caller().
+ j 0f
+#endif
+ lg %r2,272(%r15)
+ lg %r3,168(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,168(%r15)
+0:
+#endif
+ aghi %r15,160
+ lmg %r2,%r5,32(%r15)
+ lg %r14,112(%r15)
+ br %r14
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+ .globl return_to_handler
+return_to_handler:
+ stmg %r2,%r5,32(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ brasl %r14,ftrace_return_to_handler
+ aghi %r15,160
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+ br %r14
+
+#endif
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f5354..be2cae083406 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
-#else
- if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
- BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
local_irq_enable();
+#ifdef CONFIG_64BIT
+ if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
+ BUG();
+#endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
new file mode 100644
index 000000000000..086bee970cae
--- /dev/null
+++ b/arch/s390/kernel/suspend.c
@@ -0,0 +1,73 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
+
+void save_processor_state(void)
+{
+ /* swsusp_arch_suspend() actually saves all cpu register contents.
+ * Machine checks must be disabled since swsusp_arch_suspend() stores
+ * register contents to their lowcore save areas. That's the same
+ * place where register contents on machine checks would be saved.
+ * To avoid register corruption disable machine checks.
+ * We must also disable machine checks in the new psw mask for
+ * program checks, since swsusp_arch_suspend() may generate program
+ * checks. Disabling machine checks for all other new psw masks is
+ * just paranoia.
+ */
+ local_mcck_disable();
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+ S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
+}
+
+void restore_processor_state(void)
+{
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+ /* Enable lowcore protection */
+ __ctl_set_bit(0,28);
+ local_mcck_enable();
+}
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 76d688da32fa..7cd6b096f0d1 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -21,7 +21,7 @@
* This function runs with disabled interrupts.
*/
.section .text
- .align 2
+ .align 4
.globl swsusp_arch_suspend
swsusp_arch_suspend:
stmg %r6,%r15,__SF_GPRS(%r15)
@@ -32,19 +32,14 @@ swsusp_arch_suspend:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Setup base register for lowcore (absolute 0) */
- llgf %r1,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore */
+ llgf %r4,__SF_EMPTY(%r15)
/* Get pointer to save area */
- aghi %r1,0x1000
+ lghi %r1,0x1000
/* Store registers */
mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -79,17 +74,15 @@ swsusp_arch_suspend:
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
- /* Setup lowcore */
- brasl %r14,setup_lowcore_early
+ lghi %r2,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
/* Save image */
brasl %r14,swsusp_save
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Restore prefix register and return */
lghi %r1,0x1000
spx 0x318(%r1)
@@ -117,11 +110,6 @@ swsusp_arch_resume:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Set prefix page to zero */
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
@@ -175,7 +163,7 @@ swsusp_arch_resume:
/* Load old stack */
lg %r15,0x2f8(%r13)
- /* Pointer to save arae */
+ /* Pointer to save area */
lghi %r13,0x1000
#ifdef CONFIG_SMP
@@ -187,11 +175,6 @@ swsusp_arch_resume:
/* Restore prefix register */
spx 0x318(%r13)
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..6f0d86fb2aaa 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,7 +59,8 @@
#define TICK_SIZE tick
-u64 sched_clock_base_cc = -1; /* Force to data section. */
+u64 sched_clock_base = -1; /* Force to data section. */
+EXPORT_SYMBOL(sched_clock_base);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
@@ -277,7 +278,7 @@ void __init time_init(void)
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
+ tod_to_timeval(sched_clock_base - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -315,7 +316,7 @@ static unsigned long long adjust_time(unsigned long long old,
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
- sched_clock_base_cc += delta;
+ sched_clock_base += delta;
if (adjust.offset != 0) {
pr_notice("The ETR interface has adjusted the clock "
"by %li microseconds\n", adjust.offset);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 79dbfee831ec..49106c6e6f88 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
+ epsw %r5,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- sacf 0
- sar %a4,%r2
+ tml %r5,0x4000
+ jo 11f
+ tml %r5,0x8000
+ jno 10f
+ sacf 256
+ j 11f
+10: sacf 0
+11: sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
deleted file mode 100644
index 973bb45a8fec..000000000000
--- a/arch/s390/power/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for s390 PM support
-#
-
-obj-$(CONFIG_HIBERNATION) += suspend.o
-obj-$(CONFIG_HIBERNATION) += swsusp.o
-obj-$(CONFIG_HIBERNATION) += swsusp_64.o
-obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
deleted file mode 100644
index b3351eceebbe..000000000000
--- a/arch/s390/power/suspend.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Suspend support specific for s390.
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- */
-
-#include <linux/mm.h>
-#include <linux/suspend.h>
-#include <linux/reboot.h>
-#include <linux/pfn.h>
-#include <asm/sections.h>
-#include <asm/ipl.h>
-
-/*
- * References to section boundaries
- */
-extern const void __nosave_begin, __nosave_end;
-
-/*
- * check if given pfn is in the 'nosave' or in the read only NSS section
- */
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
- >> PAGE_SHIFT;
- unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
- unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
-
- if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
- return 1;
- if (pfn >= stext_pfn && pfn <= eshared_pfn) {
- if (ipl_info.type == IPL_TYPE_NSS)
- return 1;
- } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
- return 1;
- return 0;
-}
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
deleted file mode 100644
index e6a4fe9f5f24..000000000000
--- a/arch/s390/power/swsusp.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Support for suspend and resume on s390
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-
-/*
- * save CPU registers before creating a hibernation image and before
- * restoring the memory state from it
- */
-void save_processor_state(void)
-{
- /* implentation contained in the
- * swsusp_arch_suspend function
- */
-}
-
-/*
- * restore the contents of CPU registers
- */
-void restore_processor_state(void)
-{
- /* implentation contained in the
- * swsusp_arch_resume function
- */
-}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
deleted file mode 100644
index 9516a517d72f..000000000000
--- a/arch/s390/power/swsusp_64.c
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Support for suspend and resume on s390
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <asm/system.h>
-#include <linux/interrupt.h>
-
-void do_after_copyback(void)
-{
- mb();
-}
-