diff --git a/qemu/include/qemu/bswap.h b/qemu/include/qemu/bswap.h index 78903d44..ce6934ef 100644 --- a/qemu/include/qemu/bswap.h +++ b/qemu/include/qemu/bswap.h @@ -300,51 +300,57 @@ static inline void stb_p(void *ptr, uint8_t v) *(uint8_t *)ptr = v; } -/* Any compiler worth its salt will turn these memcpy into native unaligned - operations. Thus we don't need to play games with packed attributes, or - inline byte-by-byte stores. */ +/* + * Any compiler worth its salt will turn these memcpy into native unaligned + * operations. Thus we don't need to play games with packed attributes, or + * inline byte-by-byte stores. + * Some compilation environments (eg some fortify-source implementations) + * may intercept memcpy() in a way that defeats the compiler optimization, + * though, so we use __builtin_memcpy() to give ourselves the best chance + * of good performance. + */ static inline int lduw_he_p(const void *ptr) { uint16_t r; - memcpy(&r, ptr, sizeof(r)); + __builtin_memcpy(&r, ptr, sizeof(r)); return r; } static inline int ldsw_he_p(const void *ptr) { int16_t r; - memcpy(&r, ptr, sizeof(r)); + __builtin_memcpy(&r, ptr, sizeof(r)); return r; } static inline void stw_he_p(void *ptr, uint16_t v) { - memcpy(ptr, &v, sizeof(v)); + __builtin_memcpy(ptr, &v, sizeof(v)); } static inline int ldl_he_p(const void *ptr) { int32_t r; - memcpy(&r, ptr, sizeof(r)); + __builtin_memcpy(&r, ptr, sizeof(r)); return r; } static inline void stl_he_p(void *ptr, uint32_t v) { - memcpy(ptr, &v, sizeof(v)); + __builtin_memcpy(ptr, &v, sizeof(v)); } static inline uint64_t ldq_he_p(const void *ptr) { uint64_t r; - memcpy(&r, ptr, sizeof(r)); + __builtin_memcpy(&r, ptr, sizeof(r)); return r; } static inline void stq_he_p(void *ptr, uint64_t v) { - memcpy(ptr, &v, sizeof(v)); + __builtin_memcpy(ptr, &v, sizeof(v)); } static inline int lduw_le_p(const void *ptr)