Commit 7164f224 authored by niukuo's avatar niukuo Committed by yiteng.nyt

fix compile error on arm64

parent fc831f28
...@@ -23,9 +23,15 @@ ...@@ -23,9 +23,15 @@
#ifndef BTHREAD_PROCESSOR_H #ifndef BTHREAD_PROCESSOR_H
#define BTHREAD_PROCESSOR_H #define BTHREAD_PROCESSOR_H
#include "butil/build_config.h"
// Pause instruction to prevent excess processor bus usage, only works in GCC // Pause instruction to prevent excess processor bus usage, only works in GCC
# ifndef cpu_relax # ifndef cpu_relax
#if defined(ARCH_CPU_ARM_FAMILY)
# define cpu_relax() asm volatile("yield\n": : :"memory")
#else
# define cpu_relax() asm volatile("pause\n": : :"memory") # define cpu_relax() asm volatile("pause\n": : :"memory")
#endif
# endif # endif
// Compile read-write barrier // Compile read-write barrier
......
...@@ -132,7 +132,7 @@ static inline int FindMatchLength(const char* s1, ...@@ -132,7 +132,7 @@ static inline int FindMatchLength(const char* s1,
matched += 4; matched += 4;
} }
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) { if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched); uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero(x); int matching_bits = Bits::FindLSBSetNonZero(x);
matched += matching_bits >> 3; matched += matching_bits >> 3;
} else { } else {
......
...@@ -114,13 +114,13 @@ namespace snappy { ...@@ -114,13 +114,13 @@ namespace snappy {
// See if that would be more efficient on platforms supporting it, // See if that would be more efficient on platforms supporting it,
// at least for copies. // at least for copies.
inline uint64_tUNALIGNED_LOAD64(const void *p) { inline uint64_t UNALIGNED_LOAD64(const void *p) {
uint64_tt; uint64_t t;
memcpy(&t, p, sizeof t); memcpy(&t, p, sizeof t);
return t; return t;
} }
inline void UNALIGNED_STORE64(void *p, uint64_tv) { inline void UNALIGNED_STORE64(void *p, uint64_t v) {
memcpy(p, &v, sizeof v); memcpy(p, &v, sizeof v);
} }
...@@ -141,8 +141,8 @@ inline uint32_t UNALIGNED_LOAD32(const void *p) { ...@@ -141,8 +141,8 @@ inline uint32_t UNALIGNED_LOAD32(const void *p) {
return t; return t;
} }
inline uint64_tUNALIGNED_LOAD64(const void *p) { inline uint64_t UNALIGNED_LOAD64(const void *p) {
uint64_tt; uint64_t t;
memcpy(&t, p, sizeof t); memcpy(&t, p, sizeof t);
return t; return t;
} }
...@@ -155,7 +155,7 @@ inline void UNALIGNED_STORE32(void *p, uint32_t v) { ...@@ -155,7 +155,7 @@ inline void UNALIGNED_STORE32(void *p, uint32_t v) {
memcpy(p, &v, sizeof v); memcpy(p, &v, sizeof v);
} }
inline void UNALIGNED_STORE64(void *p, uint64_tv) { inline void UNALIGNED_STORE64(void *p, uint64_t v) {
memcpy(p, &v, sizeof v); memcpy(p, &v, sizeof v);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment