diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 17:55:21 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 17:55:21 -0700 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /arch/sh/include/asm/unaligned-sh4a.h | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'arch/sh/include/asm/unaligned-sh4a.h')
-rw-r--r-- | arch/sh/include/asm/unaligned-sh4a.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h index d8f89770275b..9f4dd252c981 100644 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -3,9 +3,9 @@ /* * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. - * Support for 16 and 64-bit accesses are done through shifting and - * masking relative to the endianness. Unaligned stores are not supported - * by the instruction encoding, so these continue to use the packed + * Support for 64-bit accesses are done through shifting and masking + * relative to the endianness. Unaligned stores are not supported by the + * instruction encoding, so these continue to use the packed * struct. * * The same note as with the movli.l/movco.l pair applies here, as long @@ -41,9 +41,9 @@ struct __una_u64 { u64 x __attribute__((packed)); }; static inline u16 __get_unaligned_cpu16(const u8 *p) { #ifdef __LITTLE_ENDIAN - return __get_unaligned_cpu32(p) & 0xffff; + return p[0] | p[1] << 8; #else - return __get_unaligned_cpu32(p) >> 16; + return p[0] << 8 | p[1]; #endif } |