diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2011-07-28 12:10:30 +0200 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2011-07-29 08:25:45 -0500 |
commit | cbbab9226da9572346837466a8770c117e7e65a2 (patch) | |
tree | 94940390582f196e164f6111c12a68df1787e5cf /bswap.h | |
parent | 789ec7ce20f34b175b3983707e077e8d67385126 (diff) |
move unaligned memory access functions to bswap.h
This is just code movement, and moving the fpu/ include path from
target-dependent to target-independent Make variables.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'bswap.h')
-rw-r--r-- | bswap.h | 474 |
1 files changed, 474 insertions, 0 deletions
@@ -11,6 +11,8 @@ #include <machine/bswap.h> #else +#include "softfloat.h" + #ifdef CONFIG_BYTESWAP_H #include <byteswap.h> #else @@ -237,4 +239,476 @@ static inline uint32_t qemu_bswap_len(uint32_t value, int len) return bswap32(value) >> (32 - 8 * len); } +typedef union { + float32 f; + uint32_t l; +} CPU_FloatU; + +typedef union { + float64 d; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upper; + uint32_t lower; + } l; +#else + struct { + uint32_t lower; + uint32_t upper; + } l; +#endif + uint64_t ll; +} CPU_DoubleU; + +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + +typedef union { + float128 q; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upmost; + uint32_t upper; + uint32_t lower; + uint32_t lowest; + } l; + struct { + uint64_t upper; + uint64_t lower; + } ll; +#else + struct { + uint32_t lowest; + uint32_t lower; + uint32_t upper; + uint32_t upmost; + } l; + struct { + uint64_t lower; + uint64_t upper; + } ll; +#endif +} CPU_QuadU; + +/* unaligned/endian-independent pointer access */ + +/* + * the generic syntax is: + * + * load: ld{type}{sign}{size}{endian}_p(ptr) + * + * store: st{type}{size}{endian}_p(ptr, val) + * + * Note there are small differences with the softmmu access API! + * + * type is: + * (empty): integer access + * f : float access + * + * sign is: + * (empty): for floats or 32 bit size + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * endian is: + * (empty): 8 bit access + * be : big endian + * le : little endian + */ +static inline int ldub_p(const void *ptr) +{ + return *(uint8_t *)ptr; +} + +static inline int ldsb_p(const void *ptr) +{ + return *(int8_t *)ptr; +} + +static inline void stb_p(void *ptr, int v) +{ + *(uint8_t *)ptr = v; +} + +/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the + kernel handles unaligned load/stores may give better results, but + it is a system wide setting : bad */ +#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) + +/* conservative code for little endian unaligned accesses */ +static inline int lduw_le_p(const void *ptr) +{ +#ifdef _ARCH_PPC + int val; + __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); + return val; +#else + const uint8_t *p = ptr; + return p[0] | (p[1] << 8); +#endif +} + +static inline int ldsw_le_p(const void *ptr) +{ +#ifdef _ARCH_PPC + int val; + __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); + return (int16_t)val; +#else + const uint8_t *p = ptr; + return (int16_t)(p[0] | (p[1] << 8)); +#endif +} + +static inline int ldl_le_p(const void *ptr) +{ +#ifdef _ARCH_PPC + int val; + __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); + return val; +#else + const uint8_t *p = ptr; + return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); +#endif +} + +static inline uint64_t ldq_le_p(const void *ptr) +{ + const uint8_t *p = ptr; + uint32_t v1, v2; + v1 = ldl_le_p(p); + v2 = ldl_le_p(p + 4); + return v1 | ((uint64_t)v2 << 32); +} + +static inline void stw_le_p(void *ptr, int v) +{ +#ifdef _ARCH_PPC + __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); +#else + uint8_t *p = ptr; + p[0] = v; + p[1] = v >> 8; +#endif +} + +static inline void stl_le_p(void *ptr, int v) +{ +#ifdef _ARCH_PPC + __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); +#else + uint8_t *p = ptr; + p[0] = v; + p[1] = v >> 8; + p[2] = v >> 16; + p[3] = v >> 24; +#endif +} + +static inline void stq_le_p(void *ptr, uint64_t v) +{ + uint8_t *p = ptr; + stl_le_p(p, (uint32_t)v); + stl_le_p(p + 4, v >> 32); +} + +/* float access */ + +static inline float32 ldfl_le_p(const void *ptr) +{ + union { + float32 f; + uint32_t i; + } u; + u.i = ldl_le_p(ptr); + return u.f; +} + +static inline void stfl_le_p(void *ptr, float32 v) +{ + union { + float32 f; + uint32_t i; + } u; + u.f = v; + stl_le_p(ptr, u.i); +} + +static inline float64 ldfq_le_p(const void *ptr) +{ + CPU_DoubleU u; + u.l.lower = ldl_le_p(ptr); + u.l.upper = ldl_le_p(ptr + 4); + return u.d; +} + +static inline void stfq_le_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stl_le_p(ptr, u.l.lower); + stl_le_p(ptr + 4, u.l.upper); +} + +#else + +static inline int lduw_le_p(const void *ptr) +{ + return *(uint16_t *)ptr; +} + +static inline int ldsw_le_p(const void *ptr) +{ + return *(int16_t *)ptr; +} + +static inline int ldl_le_p(const void *ptr) +{ + return *(uint32_t *)ptr; +} + +static inline uint64_t ldq_le_p(const void *ptr) +{ + return *(uint64_t *)ptr; +} + +static inline void stw_le_p(void *ptr, int v) +{ + *(uint16_t *)ptr = v; +} + +static inline void stl_le_p(void *ptr, int v) +{ + *(uint32_t *)ptr = v; +} + +static inline void stq_le_p(void *ptr, uint64_t v) +{ + *(uint64_t *)ptr = v; +} + +/* float access */ + +static inline float32 ldfl_le_p(const void *ptr) +{ + return *(float32 *)ptr; +} + +static inline float64 ldfq_le_p(const void *ptr) +{ + return *(float64 *)ptr; +} + +static inline void stfl_le_p(void *ptr, float32 v) +{ + *(float32 *)ptr = v; +} + +static inline void stfq_le_p(void *ptr, float64 v) +{ + *(float64 *)ptr = v; +} +#endif + +#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) + +static inline int lduw_be_p(const void *ptr) +{ +#if defined(__i386__) + int val; + asm volatile ("movzwl %1, %0\n" + "xchgb %b0, %h0\n" + : "=q" (val) + : "m" (*(uint16_t *)ptr)); + return val; +#else + const uint8_t *b = ptr; + return ((b[0] << 8) | b[1]); +#endif +} + +static inline int ldsw_be_p(const void *ptr) +{ +#if defined(__i386__) + int val; + asm volatile ("movzwl %1, %0\n" + "xchgb %b0, %h0\n" + : "=q" (val) + : "m" (*(uint16_t *)ptr)); + return (int16_t)val; +#else + const uint8_t *b = ptr; + return (int16_t)((b[0] << 8) | b[1]); +#endif +} + +static inline int ldl_be_p(const void *ptr) +{ +#if defined(__i386__) || defined(__x86_64__) + int val; + asm volatile ("movl %1, %0\n" + "bswap %0\n" + : "=r" (val) + : "m" (*(uint32_t *)ptr)); + return val; +#else + const uint8_t *b = ptr; + return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; +#endif +} + +static inline uint64_t ldq_be_p(const void *ptr) +{ + uint32_t a,b; + a = ldl_be_p(ptr); + b = ldl_be_p((uint8_t *)ptr + 4); + return (((uint64_t)a<<32)|b); +} + +static inline void stw_be_p(void *ptr, int v) +{ +#if defined(__i386__) + asm volatile ("xchgb %b0, %h0\n" + "movw %w0, %1\n" + : "=q" (v) + : "m" (*(uint16_t *)ptr), "0" (v)); +#else + uint8_t *d = (uint8_t *) ptr; + d[0] = v >> 8; + d[1] = v; +#endif +} + +static inline void stl_be_p(void *ptr, int v) +{ +#if defined(__i386__) || defined(__x86_64__) + asm volatile ("bswap %0\n" + "movl %0, %1\n" + : "=r" (v) + : "m" (*(uint32_t *)ptr), "0" (v)); +#else + uint8_t *d = (uint8_t *) ptr; + d[0] = v >> 24; + d[1] = v >> 16; + d[2] = v >> 8; + d[3] = v; +#endif +} + +static inline void stq_be_p(void *ptr, uint64_t v) +{ + stl_be_p(ptr, v >> 32); + stl_be_p((uint8_t *)ptr + 4, v); +} + +/* float access */ + +static inline float32 ldfl_be_p(const void *ptr) +{ + union { + float32 f; + uint32_t i; + } u; + u.i = ldl_be_p(ptr); + return u.f; +} + +static inline void stfl_be_p(void *ptr, float32 v) +{ + union { + float32 f; + uint32_t i; + } u; + u.f = v; + stl_be_p(ptr, u.i); +} + +static inline float64 ldfq_be_p(const void *ptr) +{ + CPU_DoubleU u; + u.l.upper = ldl_be_p(ptr); + u.l.lower = ldl_be_p((uint8_t *)ptr + 4); + return u.d; +} + +static inline void stfq_be_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stl_be_p(ptr, u.l.upper); + stl_be_p((uint8_t *)ptr + 4, u.l.lower); +} + +#else + +static inline int lduw_be_p(const void *ptr) +{ + return *(uint16_t *)ptr; +} + +static inline int ldsw_be_p(const void *ptr) +{ + return *(int16_t *)ptr; +} + +static inline int ldl_be_p(const void *ptr) +{ + return *(uint32_t *)ptr; +} + +static inline uint64_t ldq_be_p(const void *ptr) +{ + return *(uint64_t *)ptr; +} + +static inline void stw_be_p(void *ptr, int v) +{ + *(uint16_t *)ptr = v; +} + +static inline void stl_be_p(void *ptr, int v) +{ + *(uint32_t *)ptr = v; +} + +static inline void stq_be_p(void *ptr, uint64_t v) +{ + *(uint64_t *)ptr = v; +} + +/* float access */ + +static inline float32 ldfl_be_p(const void *ptr) +{ + return *(float32 *)ptr; +} + +static inline float64 ldfq_be_p(const void *ptr) +{ + return *(float64 *)ptr; +} + +static inline void stfl_be_p(void *ptr, float32 v) +{ + *(float32 *)ptr = v; +} + +static inline void stfq_be_p(void *ptr, float64 v) +{ + *(float64 *)ptr = v; +} + +#endif + #endif /* BSWAP_H */ |