summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig19
-rw-r--r--include/linux/compiler-gcc4.h10
-rw-r--r--include/linux/compiler-intel.h7
-rw-r--r--include/uapi/linux/swab.h12
4 files changed, 45 insertions, 3 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 366ec06a5185..c31416b10586 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -112,6 +112,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
See Documentation/unaligned-memory-access.txt for more
information on the topic of unaligned memory accesses.
+config ARCH_USE_BUILTIN_BSWAP
+ bool
+ help
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
+
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
+
config HAVE_SYSCALL_WRAPPERS
bool
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 412bc6c2b023..dc16a858e77c 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -63,3 +63,13 @@
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if __GNUC_MINOR__ >= 4
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index d8e636e5607d..973ce10c40b6 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -29,3 +29,10 @@
#endif
#define uninitialized_var(x) x
+
+#ifndef __HAVE_BUILTIN_BSWAP16__
+/* icc has this, but it's called _bswap16 */
+#define __HAVE_BUILTIN_BSWAP16__
+#define __builtin_bswap16 _bswap16
+#endif
+
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index e811474724c2..0e011eb91b5d 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,7 +45,9 @@
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
-#ifdef __arch_swab16
+#ifdef __HAVE_BUILTIN_BSWAP16__
+ return __builtin_bswap16(val);
+#elif defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
-#ifdef __arch_swab32
+#ifdef __HAVE_BUILTIN_BSWAP32__
+ return __builtin_bswap32(val);
+#elif defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
-#ifdef __arch_swab64
+#ifdef __HAVE_BUILTIN_BSWAP64__
+ return __builtin_bswap64(val);
+#elif defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;