mirror of
https://github.com/microsoft/vcpkg.git
synced 2025-01-19 20:53:01 +08:00
158 lines
4.7 KiB
Diff
158 lines
4.7 KiB
Diff
diff --git a/src/snappy.c b/src/snappy.c
|
|
index c3b6ea8a..e74e6903 100644
|
|
--- a/src/snappy.c
|
|
+++ b/src/snappy.c
|
|
@@ -63,9 +63,6 @@
|
|
|
|
#include "rd.h"
|
|
|
|
-#ifdef _MSC_VER
|
|
-#define inline __inline
|
|
-#endif
|
|
|
|
#define CRASH_UNLESS(x) BUG_ON(!(x))
|
|
#define CHECK(cond) CRASH_UNLESS(cond)
|
|
@@ -76,12 +73,12 @@
|
|
#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
|
|
#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
|
|
|
|
-#define UNALIGNED_LOAD16(_p) get_unaligned((u16 *)(_p))
|
|
-#define UNALIGNED_LOAD32(_p) get_unaligned((u32 *)(_p))
|
|
+#define UNALIGNED_LOAD16(_p) get_unaligned16((u16 *)(_p))
|
|
+#define UNALIGNED_LOAD32(_p) get_unaligned32((u32 *)(_p))
|
|
#define UNALIGNED_LOAD64(_p) get_unaligned64((u64 *)(_p))
|
|
|
|
-#define UNALIGNED_STORE16(_p, _val) put_unaligned(_val, (u16 *)(_p))
|
|
-#define UNALIGNED_STORE32(_p, _val) put_unaligned(_val, (u32 *)(_p))
|
|
+#define UNALIGNED_STORE16(_p, _val) put_unaligned16(_val, (u16 *)(_p))
|
|
+#define UNALIGNED_STORE32(_p, _val) put_unaligned32(_val, (u32 *)(_p))
|
|
#define UNALIGNED_STORE64(_p, _val) put_unaligned64(_val, (u64 *)(_p))
|
|
|
|
/*
|
|
diff --git a/src/snappy_compat.h b/src/snappy_compat.h
|
|
index 77606552..21e0ea6b 100644
|
|
--- a/src/snappy_compat.h
|
|
+++ b/src/snappy_compat.h
|
|
@@ -44,23 +44,65 @@ struct iovec {
|
|
};
|
|
#endif
|
|
|
|
-#define get_unaligned_memcpy(x) ({ \
|
|
- typeof(*(x)) _ret; \
|
|
- memcpy(&_ret, (x), sizeof(*(x))); \
|
|
- _ret; })
|
|
-#define put_unaligned_memcpy(v,x) ({ \
|
|
- typeof((v)) _v = (v); \
|
|
- memcpy((x), &_v, sizeof(*(x))); })
|
|
+#ifdef _MSC_VER
|
|
+#define inline __inline
|
|
+#endif
|
|
+
|
|
+typedef unsigned char u8;
|
|
+typedef unsigned short u16;
|
|
+typedef unsigned u32;
|
|
+typedef unsigned long long u64;
|
|
+
|
|
+
|
|
+static inline u16 get_unaligned16_memcpy(const void *p)
|
|
+{
|
|
+ u16 t;
|
|
+ memcpy(&t, p, sizeof t);
|
|
+ return t;
|
|
+}
|
|
+
|
|
+static inline u32 get_unaligned32_memcpy(const void *p)
|
|
+{
|
|
+ u32 t;
|
|
+ memcpy(&t, p, sizeof t);
|
|
+ return t;
|
|
+}
|
|
+
|
|
+static inline u64 get_unaligned64_memcpy(const void *p)
|
|
+{
|
|
+ u64 t;
|
|
+ memcpy(&t, p, sizeof t);
|
|
+ return t;
|
|
+}
|
|
+
|
|
+static inline void put_unaligned16_memcpy(u16 v, void *p)
|
|
+{
|
|
+ memcpy(p, &v, sizeof v);
|
|
+}
|
|
+
|
|
+static inline void put_unaligned32_memcpy(u32 v, void *p)
|
|
+{
|
|
+ memcpy(p, &v, sizeof v);
|
|
+}
|
|
+
|
|
+static inline void put_unaligned64_memcpy(u64 v, void *p)
|
|
+{
|
|
+ memcpy(p, &v, sizeof v);
|
|
+}
|
|
|
|
#define get_unaligned_direct(x) (*(x))
|
|
#define put_unaligned_direct(v,x) (*(x) = (v))
|
|
|
|
// Potentially unaligned loads and stores.
|
|
-// x86 and PowerPC can simply do these loads and stores native.
|
|
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64)
|
|
-
|
|
-#define get_unaligned get_unaligned_direct
|
|
-#define put_unaligned put_unaligned_direct
|
|
+// x86, PowerPC, and ARM64 can simply do these loads and stores native.
|
|
+#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
|
|
+ defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) || \
|
|
+ defined(__aarch64__)
|
|
+
|
|
+#define get_unaligned16 get_unaligned_direct
|
|
+#define put_unaligned16 put_unaligned_direct
|
|
+#define get_unaligned32 get_unaligned_direct
|
|
+#define put_unaligned32 put_unaligned_direct
|
|
#define get_unaligned64 get_unaligned_direct
|
|
#define put_unaligned64 put_unaligned_direct
|
|
|
|
@@ -86,29 +128,29 @@ struct iovec {
|
|
!defined(__ARM_ARCH_6ZK__) && \
|
|
!defined(__ARM_ARCH_6T2__)
|
|
|
|
-#define get_unaligned get_unaligned_direct
|
|
-#define put_unaligned put_unaligned_direct
|
|
-#define get_unaligned64 get_unaligned_memcpy
|
|
-#define put_unaligned64 put_unaligned_memcpy
|
|
+#define get_unaligned16 get_unaligned_direct
|
|
+#define put_unaligned16 put_unaligned_direct
|
|
+#define get_unaligned32 get_unaligned_direct
|
|
+#define put_unaligned32 put_unaligned_direct
|
|
+#define get_unaligned64 get_unaligned64_memcpy
|
|
+#define put_unaligned64 put_unaligned64_memcpy
|
|
|
|
// These macroses are provided for architectures that don't support
|
|
// unaligned loads and stores.
|
|
#else
|
|
|
|
-#define get_unaligned get_unaligned_memcpy
|
|
-#define put_unaligned put_unaligned_memcpy
|
|
-#define get_unaligned64 get_unaligned_memcpy
|
|
-#define put_unaligned64 put_unaligned_memcpy
|
|
+#define get_unaligned16 get_unaligned16_memcpy
|
|
+#define put_unaligned16 put_unaligned16_memcpy
|
|
+#define get_unaligned32 get_unaligned32_memcpy
|
|
+#define put_unaligned32 put_unaligned32_memcpy
|
|
+#define get_unaligned64 get_unaligned64_memcpy
|
|
+#define put_unaligned64 put_unaligned64_memcpy
|
|
|
|
#endif
|
|
|
|
-#define get_unaligned_le32(x) (le32toh(get_unaligned((u32 *)(x))))
|
|
-#define put_unaligned_le16(v,x) (put_unaligned(htole16(v), (u16 *)(x)))
|
|
+#define get_unaligned_le32(x) (le32toh(get_unaligned32((u32 *)(x))))
|
|
+#define put_unaligned_le16(v,x) (put_unaligned16(htole16(v), (u16 *)(x)))
|
|
|
|
-typedef unsigned char u8;
|
|
-typedef unsigned short u16;
|
|
-typedef unsigned u32;
|
|
-typedef unsigned long long u64;
|
|
|
|
#ifdef _MSC_VER
|
|
#define BUG_ON(x) do { if (unlikely((x))) abort(); } while (0)
|