diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/crypto.h | 6 | ||||
-rw-r--r-- | include/linux/slab_def.h | 24 | ||||
-rw-r--r-- | include/linux/slob_def.h | 8 | ||||
-rw-r--r-- | include/linux/slub_def.h | 8 |
4 files changed, 40 insertions, 6 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 24d2e30f1b46..a6a7a1c83f54 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -99,13 +99,7 @@ * as arm where pointers are 32-bit aligned but there are data types such as * u64 which require 64-bit alignment. */ -#if defined(ARCH_KMALLOC_MINALIGN) #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN -#elif defined(ARCH_SLAB_MINALIGN) -#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN -#else -#define CRYPTO_MINALIGN __alignof__(unsigned long long) -#endif #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ca6b2b317991..1812dac8c496 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -16,6 +16,30 @@ #include <linux/compiler.h> #include <linux/kmemtrace.h> +#ifndef ARCH_KMALLOC_MINALIGN +/* + * Enforce a minimum alignment for the kmalloc caches. + * Usually, the kmalloc caches are cache_line_size() aligned, except when + * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * ARCH_KMALLOC_MINALIGN allows that. + * Note that increasing this value may disable some debug features. + */ +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +/* + * Enforce a minimum alignment for all caches. + * Intended for archs that get misalignment faults even for BYTES_PER_WORD + * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. + * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables + * some debug features. + */ +#define ARCH_SLAB_MINALIGN 0 +#endif + /* * struct kmem_cache * diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..62667f72c2ef 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -1,6 +1,14 @@ #ifndef __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) +#endif + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0249d4175bac..55695c8d2f8a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -116,6 +116,14 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" |