Unverified Commit 227363bc authored by Adam Cozzette's avatar Adam Cozzette Committed by GitHub

Merge pull request #4412 from acozzette/remove-old-files

Removed some unused C++ source files
parents ed4321d1 0c5fcdee
......@@ -89,13 +89,10 @@ cc_library(
"src/google/protobuf/io/zero_copy_stream_impl_lite.cc",
"src/google/protobuf/message_lite.cc",
"src/google/protobuf/repeated_field.cc",
"src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc",
"src/google/protobuf/stubs/atomicops_internals_x86_msvc.cc",
"src/google/protobuf/stubs/bytestream.cc",
"src/google/protobuf/stubs/common.cc",
"src/google/protobuf/stubs/int128.cc",
"src/google/protobuf/stubs/io_win32.cc",
"src/google/protobuf/stubs/once.cc",
"src/google/protobuf/stubs/status.cc",
"src/google/protobuf/stubs/statusor.cc",
"src/google/protobuf/stubs/stringpiece.cc",
......@@ -556,7 +553,6 @@ cc_test(
"src/google/protobuf/stubs/common_unittest.cc",
"src/google/protobuf/stubs/int128_unittest.cc",
"src/google/protobuf/stubs/io_win32_unittest.cc",
"src/google/protobuf/stubs/once_unittest.cc",
"src/google/protobuf/stubs/status_test.cc",
"src/google/protobuf/stubs/statusor_test.cc",
"src/google/protobuf/stubs/stringpiece_unittest.cc",
......@@ -565,7 +561,6 @@ cc_test(
"src/google/protobuf/stubs/strutil_unittest.cc",
"src/google/protobuf/stubs/template_util_unittest.cc",
"src/google/protobuf/stubs/time_test.cc",
"src/google/protobuf/stubs/type_traits_unittest.cc",
"src/google/protobuf/text_format_unittest.cc",
"src/google/protobuf/unknown_field_set_unittest.cc",
"src/google/protobuf/util/delimited_message_util_test.cc",
......
......@@ -54,6 +54,7 @@ copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\generated_message_tab
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\generated_message_util.h" include\google\protobuf\generated_message_util.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\has_bits.h" include\google\protobuf\has_bits.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\implicit_weak_message.h" include\google\protobuf\implicit_weak_message.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\inlined_string_field.h" include\google\protobuf\inlined_string_field.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\io\coded_stream.h" include\google\protobuf\io\coded_stream.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\io\gzip_stream.h" include\google\protobuf\io\gzip_stream.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\io\printer.h" include\google\protobuf\io\printer.h
......@@ -96,7 +97,6 @@ copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\stubs\status.h" inclu
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\stubs\stl_util.h" include\google\protobuf\stubs\stl_util.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\stubs\stringpiece.h" include\google\protobuf\stubs\stringpiece.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\stubs\template_util.h" include\google\protobuf\stubs\template_util.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\stubs\type_traits.h" include\google\protobuf\stubs\type_traits.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\text_format.h" include\google\protobuf\text_format.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\timestamp.pb.h" include\google\protobuf\timestamp.pb.h
copy "${PROTOBUF_SOURCE_WIN32_PATH}\..\src\google\protobuf\type.pb.h" include\google\protobuf\type.pb.h
......
......@@ -155,8 +155,7 @@ set(tests_files
${protobuf_source_dir}/src/google/protobuf/preserve_unknown_enum_test.cc
${protobuf_source_dir}/src/google/protobuf/proto3_arena_lite_unittest.cc
${protobuf_source_dir}/src/google/protobuf/proto3_arena_unittest.cc
# TODO(b/74491957) Make this unittest work
# ${protobuf_source_dir}/src/google/protobuf/proto3_lite_unittest.cc
${protobuf_source_dir}/src/google/protobuf/proto3_lite_unittest.cc
${protobuf_source_dir}/src/google/protobuf/reflection_ops_unittest.cc
${protobuf_source_dir}/src/google/protobuf/repeated_field_reflection_unittest.cc
${protobuf_source_dir}/src/google/protobuf/repeated_field_unittest.cc
......
......@@ -81,7 +81,6 @@ nobase_include_HEADERS = \
google/protobuf/stubs/stl_util.h \
google/protobuf/stubs/stringpiece.h \
google/protobuf/stubs/template_util.h \
google/protobuf/stubs/type_traits.h \
google/protobuf/any.pb.h \
google/protobuf/api.pb.h \
google/protobuf/any.h \
......@@ -762,7 +761,6 @@ protobuf_test_SOURCES = \
google/protobuf/stubs/strutil_unittest.cc \
google/protobuf/stubs/template_util_unittest.cc \
google/protobuf/stubs/time_test.cc \
google/protobuf/stubs/type_traits_unittest.cc \
google/protobuf/any_test.cc \
google/protobuf/arenastring_unittest.cc \
google/protobuf/arena_unittest.cc \
......
// Protocol Buffers - Google's data interchange format
// Copyright 2014 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
#define GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
#include <google/protobuf/stubs/atomicops.h>
namespace google {
namespace protobuf {
namespace internal {
class SequenceNumber {
public:
SequenceNumber() : word_(0) {}
AtomicWord GetNext() {
return NoBarrier_AtomicIncrement(&word_, 1) - 1;
}
private:
AtomicWord word_;
};
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
namespace google {
namespace protobuf {
namespace internal {
inline void MemoryBarrierInternal() {
__asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
}
// NoBarrier versions of the operation include "memory" in the clobber list.
// This is not required for direct usage of the NoBarrier versions of the
// operations. However this is required for correctness when they are used as
// part of the Acquire or Release versions, to ensure that nothing from outside
// the call is reordered between the operation and the memory barrier. This does
// not change the code generated, so has no or minimal impact on the
// NoBarrier operations.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrierInternal();
Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrierInternal();
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return prev;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrierInternal();
Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %w[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value;
__asm__ __volatile__ ( // NOLINT
"ldar %w[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrierInternal();
Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrierInternal();
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return prev;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrierInternal();
Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %x[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__ ( // NOLINT
"ldar %x[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrierInternal();
return *ptr;
}
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace google {
namespace protobuf {
namespace internal {
// 0xffff0fc0 is the hard coded address of a function provided by
// the kernel which implements an atomic compare-exchange. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr);
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
(LinuxKernelCmpxchgFunc) 0xffff0fc0;
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)) == 0) {
// The exchange took place as expected.
return new_value;
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrierInternal() {
pLinuxKernelMemoryBarrier();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrierInternal();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrierInternal();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
// For _smp_cmpxchg()
#include <pthread.h>
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 QNXCmpxchg(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr) {
return static_cast<Atomic32>(
_smp_cmpxchg((volatile unsigned *)ptr,
(unsigned)old_value,
(unsigned)new_value));
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!QNXCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (QNXCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (QNXCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)) == 0) {
// The exchange took place as expected.
return new_value;
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrierInternal() {
__sync_synchronize();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrierInternal();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrierInternal();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_C11_ATOMIC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_C11_ATOMIC_H_
#include <atomic>
namespace google {
namespace protobuf {
namespace internal {
// This implementation is transitional and maintains the original API for
// atomicops.h. This requires casting memory locations to the atomic types, and
// assumes that the API and the C++11 implementation are layout-compatible,
// which isn't true for all implementations or hardware platforms. The static
// assertion should detect this issue, were it to fire then this header
// shouldn't be used.
//
// TODO(jfb) If this header manages to stay committed then the API should be
// modified, and all call sites updated.
typedef volatile std::atomic<Atomic32>* AtomicLocation32;
static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
"incompatible 32-bit atomic layout");
inline void MemoryBarrierInternal() {
#if defined(__GLIBCXX__)
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
// not defined, leading to the linker complaining about undefined references.
__atomic_thread_fence(std::memory_order_seq_cst);
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
#endif
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_relaxed,
std::memory_order_relaxed);
return old_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return ((AtomicLocation32)ptr)
->exchange(new_value, std::memory_order_relaxed);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment +
((AtomicLocation32)ptr)
->fetch_add(increment, std::memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_acquire,
std::memory_order_acquire);
return old_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
((AtomicLocation32)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_release,
std::memory_order_relaxed);
return old_value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
((AtomicLocation32)ptr)->store(value, std::memory_order_release);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
}
#if defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
typedef volatile std::atomic<Atomic64>* AtomicLocation64;
static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
"incompatible 64-bit atomic layout");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_relaxed,
std::memory_order_relaxed);
return old_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return ((AtomicLocation64)ptr)
->exchange(new_value, std::memory_order_relaxed);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment +
((AtomicLocation64)ptr)
->fetch_add(increment, std::memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_acquire,
std::memory_order_acquire);
return old_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
((AtomicLocation64)ptr)
->compare_exchange_strong(old_value,
new_value,
std::memory_order_release,
std::memory_order_relaxed);
return old_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
((AtomicLocation64)ptr)->store(value, std::memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrierInternal();
return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
}
#endif // defined(GOOGLE_PROTOBUF_ARCH_64_BIT)
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_C11_ATOMIC_H_
// Copyright 2013 Red Hat Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Red Hat Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
return old_value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void MemoryBarrierInternal() {
__sync_synchronize();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_SEQ_CST);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
}
#ifdef __LP64__
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_value;
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
return old_value;
}
#endif // defined(__LP64__)
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace google {
namespace protobuf {
namespace internal {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %1, %4\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %4\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
// memory access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrierInternal() {
__asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrierInternal();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrierInternal();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
#if defined(__LP64__)
// 64-bit versions of the atomic ops.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"scd %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %1, %4\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"scd %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %4\n" // temp = *ptr
"daddu %1, %0, %3\n" // temp2 = temp + increment
"scd %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"daddu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrierInternal();
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrierInternal();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
// memory access.
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrierInternal();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrierInternal();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrierInternal();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrierInternal();
return *ptr;
}
#endif
} // namespace internal
} // namespace protobuf
} // namespace google
#undef ATOMICOPS_COMPILER_BARRIER
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
// Copyright 2014 Bloomberg Finance LP. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Bloomberg Finance LP. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t"
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[val], %[res] \n\t" // add the operand
" stwcx. %[res], %[zero], %[obj] \n\t" // store old value
// if still reserved
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void MemoryBarrierInternal(void) {
asm volatile (
" lwsync \n\t"
" isync \n\t"
:
:
: "memory");
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[val], %[res] \n\t" // add the operand
" stwcx. %[res], %[zero], %[obj] \n\t" // store old value
// if still reserved
" bne- 1b \n\t"
" isync \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
" isync \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
asm volatile (
" stw %[val], %[obj] \n\t"
" isync \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
asm volatile (
" lwsync \n\t"
" stw %[val], %[obj] \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 result;
asm volatile (
"1: lwz %[res], %[obj] \n\t"
" cmpw %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
" isync \n\t"
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwz %[res], %[obj] \n\t"
" cmpw %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t"
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[res], %[val] \n\t" // add the operand
" stdcx. %[res], %[zero], %[obj] \n\t" // store old value if
// still reserved
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[res], %[val] \n\t" // add the operand
" stdcx. %[res], %[zero], %[obj] \n\t" // store old value if
// still reserved
" bne- 1b \n\t"
" isync \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
" isync \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
asm volatile (
" std %[val], %[obj] \n\t"
" isync \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
asm volatile (
" lwsync \n\t"
" std %[val], %[obj] \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 result;
asm volatile (
"1: ld %[res], %[obj] \n\t"
" cmpd %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
" isync \n\t"
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ld %[res], %[obj] \n\t"
" cmpd %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
#endif
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2015 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: ogabbay@advaoptical.com (Oded Gabbay)
// Cleaned up by: bsilver16384@gmail.com (Brian Silverman)
//
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__(
"0: \n\t"
"lwarx %[prev],0,%[ptr] \n\t"
"cmpw 0,%[prev],%[old_value] \n\t"
"bne- 1f \n\t"
"stwcx. %[new_value],0,%[ptr] \n\t"
"bne- 0b \n\t"
"1: \n\t"
: [prev] "=&r"(prev), "+m"(*ptr)
: [ptr] "r"(ptr), [old_value] "r"(old_value), [new_value] "r"(new_value)
: "cc", "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old;
__asm__ __volatile__(
"0: \n\t"
"lwarx %[old],0,%[ptr] \n\t"
"stwcx. %[new_value],0,%[ptr] \n\t"
"bne- 0b \n\t"
: [old] "=&r"(old), "+m"(*ptr)
: [ptr] "r"(ptr), [new_value] "r"(new_value)
: "cc", "memory");
return old;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
Atomic32 temp;
__asm__ __volatile__(
"0: \n\t"
"lwarx %[temp],0,%[ptr] \n\t"
"add %[temp],%[increment],%[temp] \n\t"
"stwcx. %[temp],0,%[ptr] \n\t"
"bne- 0b \n\t"
: [temp] "=&r"(temp)
: [increment] "r"(increment), [ptr] "r"(ptr)
: "cc", "memory");
return temp;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
MemoryBarrierInternal();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrierInternal();
return res;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value, Atomic32 new_value) {
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value, Atomic32 new_value) {
MemoryBarrierInternal();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return res;
}
inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrierInternal() { __asm__ __volatile__("sync" : : : "memory"); }
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
MemoryBarrierInternal();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
MemoryBarrierInternal();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
MemoryBarrierInternal();
return *ptr;
}
} // namespace internal
} // namespace protobuf
} // namespace google
#undef ATOMICOPS_COMPILER_BARRIER
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_
// Copyright 2014 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
#include <atomic.h>
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return (Atomic32)atomic_cas_32((volatile uint32_t*)ptr, (uint32_t)old_value, (uint32_t)new_value);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return (Atomic32)atomic_swap_32((volatile uint32_t*)ptr, (uint32_t)new_value);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return (Atomic32)atomic_add_32_nv((volatile uint32_t*)ptr, (uint32_t)increment);
}
inline void MemoryBarrierInternal(void) {
membar_producer();
membar_consumer();
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrierInternal();
Atomic32 ret = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrierInternal();
return ret;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return ret;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrierInternal();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
membar_producer();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
membar_consumer();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 val = *ptr;
membar_consumer();
return val;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
membar_producer();
return *ptr;
}
#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return atomic_cas_64((volatile uint64_t*)ptr, (uint64_t)old_value, (uint64_t)new_value);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) {
return atomic_swap_64((volatile uint64_t*)ptr, (uint64_t)new_value);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {
return atomic_add_64_nv((volatile uint64_t*)ptr, increment);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {
MemoryBarrierInternal();
Atomic64 ret = atomic_add_64_nv((volatile uint64_t*)ptr, increment);
MemoryBarrierInternal();
return ret;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrierInternal();
return ret;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrierInternal();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
membar_producer();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
membar_consumer();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 ret = *ptr;
membar_consumer();
return ret;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
membar_producer();
return *ptr;
}
#endif
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2013 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).
// Use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
#include <sanitizer/tsan_interface_atomic.h>
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
inline void MemoryBarrierInternal() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace internal
} // namespace protobuf
} // namespace google
#undef ATOMICOPS_COMPILER_BARRIER
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This module gets enough CPU information to optimize the
// atomicops module on x86.
#include <cstring>
#include <google/protobuf/stubs/atomicops.h>
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined(__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
namespace google {
namespace protobuf {
namespace internal {
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // bug can't exist before process spawns multiple threads
false, // no SSE2
};
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
void AtomicOps_Internalx86CPUFeaturesInit() {
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// Opteron Rev E has a bug in which on very rare occasions a locked
// instruction doesn't act as a read-acquire barrier if followed by a
// non-locked read-modify-write instruction. Rev F has this bug in
// pre-release versions, but not in versions released to customers,
// so we test only for Rev E, which is family 15, model 32..63 inclusive.
if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
family == 15 &&
32 <= model && model <= 63) {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
} else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
}
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
}
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {
AtomicOps_Internalx86CPUFeaturesInit();
}
};
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;
} // namespace
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // __i386__
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
namespace google {
namespace protobuf {
namespace internal {
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
return temp + increment;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrierInternal() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrierInternal();
}
#else
inline void MemoryBarrierInternal() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier on PIII
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
return temp + increment;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrierInternal();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrierInternal();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(__x86_64__)
} // namespace internal
} // namespace protobuf
} // namespace google
#undef ATOMICOPS_COMPILER_BARRIER
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The compilation of extension_set.cc fails when windows.h is included.
// Therefore we move the code depending on windows.h to this separate cc file.
// Don't compile this file for people not concerned about thread safety.
#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
#include <google/protobuf/stubs/atomicops.h>
#ifdef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
#include <windows.h>
namespace google {
namespace protobuf {
namespace internal {
inline void MemoryBarrierInternal() {
// On ARM this is a define while on x86/x64 this is
// a function declared in WinNT.h
MemoryBarrier();
}
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return InterlockedExchangeAdd(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) + increment;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) + increment;
}
#endif // defined(_WIN64)
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
#endif // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
// Protocol Buffers - Google's data interchange format
// Copyright 2012 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrierInternal();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrierInternal();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// emulates google3/base/once.h
//
// This header is intended to be included only by internal .cc files and
// generated .pb.cc files. Users should not use this directly.
#include <google/protobuf/stubs/once.h>
#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY
#ifdef _WIN32
#include <windows.h>
#else
#include <sched.h>
#endif
#include <google/protobuf/stubs/atomicops.h>
namespace google {
namespace protobuf {
namespace {
void SchedYield() {
#ifdef _WIN32
Sleep(0);
#else // POSIX
sched_yield();
#endif
}
} // namespace
void GoogleOnceInitImpl(ProtobufOnceType* once, Closure* closure) {
internal::AtomicWord state = internal::Acquire_Load(once);
// Fast path. The provided closure was already executed.
if (state == ONCE_STATE_DONE) {
return;
}
// The closure execution did not complete yet. The once object can be in one
// of the two following states:
// - UNINITIALIZED: We are the first thread calling this function.
// - EXECUTING_CLOSURE: Another thread is already executing the closure.
//
// First, try to change the state from UNINITIALIZED to EXECUTING_CLOSURE
// atomically.
state = internal::Acquire_CompareAndSwap(
once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_CLOSURE);
if (state == ONCE_STATE_UNINITIALIZED) {
// We are the first thread to call this function, so we have to call the
// closure.
closure->Run();
internal::Release_Store(once, ONCE_STATE_DONE);
} else {
// Another thread has already started executing the closure. We need to
// wait until it completes the initialization.
while (state == ONCE_STATE_EXECUTING_CLOSURE) {
// Note that futex() could be used here on Linux as an improvement.
SchedYield();
state = internal::Acquire_Load(once);
}
}
}
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_NO_THREAD_SAFETY
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
#ifdef _WIN32
#include <windows.h>
#else
#include <unistd.h>
#include <pthread.h>
#endif
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/testing/googletest.h>
#include <gtest/gtest.h>
namespace google {
namespace protobuf {
namespace {
class OnceInitTest : public testing::Test {
protected:
void SetUp() {
state_ = INIT_NOT_STARTED;
current_test_ = this;
}
// Since ProtobufOnceType is only allowed to be allocated in static storage,
// each test must use a different pair of ProtobufOnceType objects which it
// must declare itself.
void SetOnces(ProtobufOnceType* once, ProtobufOnceType* recursive_once) {
once_ = once;
recursive_once_ = recursive_once;
}
void InitOnce() {
GoogleOnceInit(once_, &InitStatic);
}
void InitRecursiveOnce() {
GoogleOnceInit(recursive_once_, &InitRecursiveStatic);
}
void BlockInit() { init_blocker_.Lock(); }
void UnblockInit() { init_blocker_.Unlock(); }
class TestThread {
public:
TestThread(Closure* callback)
: done_(false), joined_(false), callback_(callback) {
#ifdef _WIN32
thread_ = CreateThread(NULL, 0, &Start, this, 0, NULL);
#else
pthread_create(&thread_, NULL, &Start, this);
#endif
}
~TestThread() {
if (!joined_) Join();
}
bool IsDone() {
MutexLock lock(&done_mutex_);
return done_;
}
void Join() {
joined_ = true;
#ifdef _WIN32
WaitForSingleObject(thread_, INFINITE);
CloseHandle(thread_);
#else
pthread_join(thread_, NULL);
#endif
}
private:
#ifdef _WIN32
HANDLE thread_;
#else
pthread_t thread_;
#endif
Mutex done_mutex_;
bool done_;
bool joined_;
Closure* callback_;
#ifdef _WIN32
static DWORD WINAPI Start(LPVOID arg) {
#else
static void* Start(void* arg) {
#endif
reinterpret_cast<TestThread*>(arg)->Run();
return 0;
}
void Run() {
callback_->Run();
MutexLock lock(&done_mutex_);
done_ = true;
}
};
TestThread* RunInitOnceInNewThread() {
return new TestThread(NewCallback(this, &OnceInitTest::InitOnce));
}
TestThread* RunInitRecursiveOnceInNewThread() {
return new TestThread(
NewCallback(this, &OnceInitTest::InitRecursiveOnce));
}
enum State {
INIT_NOT_STARTED,
INIT_STARTED,
INIT_DONE
};
State CurrentState() {
MutexLock lock(&mutex_);
return state_;
}
void WaitABit() {
#ifdef _WIN32
Sleep(1000);
#else
sleep(1);
#endif
}
private:
Mutex mutex_;
Mutex init_blocker_;
State state_;
ProtobufOnceType* once_;
ProtobufOnceType* recursive_once_;
void Init() {
MutexLock lock(&mutex_);
EXPECT_EQ(INIT_NOT_STARTED, state_);
state_ = INIT_STARTED;
mutex_.Unlock();
init_blocker_.Lock();
init_blocker_.Unlock();
mutex_.Lock();
state_ = INIT_DONE;
}
static OnceInitTest* current_test_;
static void InitStatic() { current_test_->Init(); }
static void InitRecursiveStatic() { current_test_->InitOnce(); }
};
OnceInitTest* OnceInitTest::current_test_ = NULL;
GOOGLE_PROTOBUF_DECLARE_ONCE(simple_once);
TEST_F(OnceInitTest, Simple) {
SetOnces(&simple_once, NULL);
EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
InitOnce();
EXPECT_EQ(INIT_DONE, CurrentState());
// Calling again has no effect.
InitOnce();
EXPECT_EQ(INIT_DONE, CurrentState());
}
GOOGLE_PROTOBUF_DECLARE_ONCE(recursive_once1);
GOOGLE_PROTOBUF_DECLARE_ONCE(recursive_once2);
TEST_F(OnceInitTest, Recursive) {
SetOnces(&recursive_once1, &recursive_once2);
EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
InitRecursiveOnce();
EXPECT_EQ(INIT_DONE, CurrentState());
}
GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_once);
TEST_F(OnceInitTest, MultipleThreads) {
SetOnces(&multiple_threads_once, NULL);
std::unique_ptr<TestThread> threads[4];
EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
for (int i = 0; i < 4; i++) {
threads[i].reset(RunInitOnceInNewThread());
}
for (int i = 0; i < 4; i++) {
threads[i]->Join();
}
EXPECT_EQ(INIT_DONE, CurrentState());
}
GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_blocked_once1);
GOOGLE_PROTOBUF_DECLARE_ONCE(multiple_threads_blocked_once2);
TEST_F(OnceInitTest, MultipleThreadsBlocked) {
SetOnces(&multiple_threads_blocked_once1, &multiple_threads_blocked_once2);
std::unique_ptr<TestThread> threads[8];
EXPECT_EQ(INIT_NOT_STARTED, CurrentState());
BlockInit();
for (int i = 0; i < 4; i++) {
threads[i].reset(RunInitOnceInNewThread());
}
for (int i = 4; i < 8; i++) {
threads[i].reset(RunInitRecursiveOnceInNewThread());
}
WaitABit();
// We should now have one thread blocked inside Init(), four blocked waiting
// for Init() to complete, and three blocked waiting for InitRecursive() to
// complete.
EXPECT_EQ(INIT_STARTED, CurrentState());
UnblockInit();
for (int i = 0; i < 8; i++) {
threads[i]->Join();
}
EXPECT_EQ(INIT_DONE, CurrentState());
}
} // anonymous namespace
} // namespace protobuf
} // namespace google
// Protocol Buffers - Google's data interchange format
// Copyright 2014 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// from google3/util/gtl/shared_ptr.h
#ifndef GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
#define GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
#include <google/protobuf/stubs/atomicops.h>
#include <algorithm> // for swap
#include <stddef.h>
#include <memory>
namespace google {
namespace protobuf {
namespace internal {
// Alias to std::shared_ptr for any C++11 platform,
// and for any supported MSVC compiler.
#if !defined(UTIL_GTL_USE_STD_SHARED_PTR) && \
(defined(COMPILER_MSVC) || defined(LANG_CXX11))
#define UTIL_GTL_USE_STD_SHARED_PTR 1
#endif
#if defined(UTIL_GTL_USE_STD_SHARED_PTR) && UTIL_GTL_USE_STD_SHARED_PTR
// These are transitional. They will be going away soon.
// Please just #include <memory> and just type std::shared_ptr yourself, instead
// of relying on this file.
//
// Migration doc: http://go/std-shared-ptr-lsc
using std::enable_shared_from_this;
using std::shared_ptr;
using std::static_pointer_cast;
using std::weak_ptr;
#else // below, UTIL_GTL_USE_STD_SHARED_PTR not set or set to 0.
// For everything else there is the google3 implementation.
inline bool RefCountDec(volatile Atomic32 *ptr) {
return Barrier_AtomicIncrement(ptr, -1) != 0;
}
inline void RefCountInc(volatile Atomic32 *ptr) {
NoBarrier_AtomicIncrement(ptr, 1);
}
template <typename T> class shared_ptr;
template <typename T> class weak_ptr;
// This class is an internal implementation detail for shared_ptr. If two
// shared_ptrs point to the same object, they also share a control block.
// An "empty" shared_pointer refers to NULL and also has a NULL control block.
// It contains all of the state that's needed for reference counting or any
// other kind of resource management. In this implementation the control block
// happens to consist of two atomic words, the reference count (the number
// of shared_ptrs that share ownership of the object) and the weak count
// (the number of weak_ptrs that observe the object, plus 1 if the
// refcount is nonzero).
//
// The "plus 1" is to prevent a race condition in the shared_ptr and
// weak_ptr destructors. We need to make sure the control block is
// only deleted once, so we need to make sure that at most one
// object sees the weak count decremented from 1 to 0.
class SharedPtrControlBlock {
template <typename T> friend class shared_ptr;
template <typename T> friend class weak_ptr;
private:
SharedPtrControlBlock() : refcount_(1), weak_count_(1) { }
Atomic32 refcount_;
Atomic32 weak_count_;
};
// Forward declaration. The class is defined below.
template <typename T> class enable_shared_from_this;
template <typename T>
class shared_ptr {
template <typename U> friend class weak_ptr;
public:
typedef T element_type;
shared_ptr() : ptr_(NULL), control_block_(NULL) {}
explicit shared_ptr(T* ptr)
: ptr_(ptr),
control_block_(ptr != NULL ? new SharedPtrControlBlock : NULL) {
// If p is non-null and T inherits from enable_shared_from_this, we
// set up the data that shared_from_this needs.
MaybeSetupWeakThis(ptr);
}
// Copy constructor: makes this object a copy of ptr, and increments
// the reference count.
template <typename U>
shared_ptr(const shared_ptr<U>& ptr)
: ptr_(NULL),
control_block_(NULL) {
Initialize(ptr);
}
// Need non-templated version to prevent the compiler-generated default
shared_ptr(const shared_ptr<T>& ptr)
: ptr_(NULL),
control_block_(NULL) {
Initialize(ptr);
}
// Assignment operator. Replaces the existing shared_ptr with ptr.
// Increment ptr's reference count and decrement the one being replaced.
template <typename U>
shared_ptr<T>& operator=(const shared_ptr<U>& ptr) {
if (ptr_ != ptr.ptr_) {
shared_ptr<T> me(ptr); // will hold our previous state to be destroyed.
swap(me);
}
return *this;
}
// Need non-templated version to prevent the compiler-generated default
shared_ptr<T>& operator=(const shared_ptr<T>& ptr) {
if (ptr_ != ptr.ptr_) {
shared_ptr<T> me(ptr); // will hold our previous state to be destroyed.
swap(me);
}
return *this;
}
// TODO(austern): Consider providing this constructor. The draft C++ standard
// (20.8.10.2.1) includes it. However, it says that this constructor throws
// a bad_weak_ptr exception when ptr is expired. Is it better to provide this
// constructor and make it do something else, like fail with a CHECK, or to
// leave this constructor out entirely?
//
// template <typename U>
// shared_ptr(const weak_ptr<U>& ptr);
~shared_ptr() {
if (ptr_ != NULL) {
if (!RefCountDec(&control_block_->refcount_)) {
delete ptr_;
// weak_count_ is defined as the number of weak_ptrs that observe
// ptr_, plus 1 if refcount_ is nonzero.
if (!RefCountDec(&control_block_->weak_count_)) {
delete control_block_;
}
}
}
}
// Replaces underlying raw pointer with the one passed in. The reference
// count is set to one (or zero if the pointer is NULL) for the pointer
// being passed in and decremented for the one being replaced.
//
// If you have a compilation error with this code, make sure you aren't
// passing NULL, nullptr, or 0 to this function. Call reset without an
// argument to reset to a null ptr.
template <typename Y>
void reset(Y* p) {
if (p != ptr_) {
shared_ptr<T> tmp(p);
tmp.swap(*this);
}
}
void reset() {
reset(static_cast<T*>(NULL));
}
// Exchanges the contents of this with the contents of r. This function
// supports more efficient swapping since it eliminates the need for a
// temporary shared_ptr object.
void swap(shared_ptr<T>& r) {
using std::swap; // http://go/using-std-swap
swap(ptr_, r.ptr_);
swap(control_block_, r.control_block_);
}
// The following function is useful for gaining access to the underlying
// pointer when a shared_ptr remains in scope so the reference-count is
// known to be > 0 (e.g. for parameter passing).
T* get() const {
return ptr_;
}
T& operator*() const {
return *ptr_;
}
T* operator->() const {
return ptr_;
}
long use_count() const {
return control_block_ ? control_block_->refcount_ : 1;
}
bool unique() const {
return use_count() == 1;
}
private:
// If r is non-empty, initialize *this to share ownership with r,
// increasing the underlying reference count.
// If r is empty, *this remains empty.
// Requires: this is empty, namely this->ptr_ == NULL.
template <typename U>
void Initialize(const shared_ptr<U>& r) {
// This performs a static_cast on r.ptr_ to U*, which is a no-op since it
// is already a U*. So initialization here requires that r.ptr_ is
// implicitly convertible to T*.
InitializeWithStaticCast<U>(r);
}
// Initializes *this as described in Initialize, but additionally performs a
// static_cast from r.ptr_ (V*) to U*.
// NOTE(gfc): We'd need a more general form to support const_pointer_cast and
// dynamic_pointer_cast, but those operations are sufficiently discouraged
// that supporting static_pointer_cast is sufficient.
template <typename U, typename V>
void InitializeWithStaticCast(const shared_ptr<V>& r) {
if (r.control_block_ != NULL) {
RefCountInc(&r.control_block_->refcount_);
ptr_ = static_cast<U*>(r.ptr_);
control_block_ = r.control_block_;
}
}
// Helper function for the constructor that takes a raw pointer. If T
// doesn't inherit from enable_shared_from_this<T> then we have nothing to
// do, so this function is trivial and inline. The other version is declared
// out of line, after the class definition of enable_shared_from_this.
void MaybeSetupWeakThis(enable_shared_from_this<T>* ptr);
void MaybeSetupWeakThis(...) { }
T* ptr_;
SharedPtrControlBlock* control_block_;
#ifndef SWIG
template <typename U>
friend class shared_ptr;
template <typename U, typename V>
friend shared_ptr<U> static_pointer_cast(const shared_ptr<V>& rhs);
#endif
};
// Matches the interface of std::swap as an aid to generic programming.
template <typename T> void swap(shared_ptr<T>& r, shared_ptr<T>& s) {
r.swap(s);
}
template <typename T, typename U>
shared_ptr<T> static_pointer_cast(const shared_ptr<U>& rhs) {
shared_ptr<T> lhs;
lhs.template InitializeWithStaticCast<T>(rhs);
return lhs;
}
// See comments at the top of the file for a description of why this
// class exists, and the draft C++ standard (as of July 2009 the
// latest draft is N2914) for the detailed specification.
template <typename T>
class weak_ptr {
template <typename U> friend class weak_ptr;
public:
typedef T element_type;
// Create an empty (i.e. already expired) weak_ptr.
weak_ptr() : ptr_(NULL), control_block_(NULL) { }
// Create a weak_ptr that observes the same object that ptr points
// to. Note that there is no race condition here: we know that the
// control block can't disappear while we're looking at it because
// it is owned by at least one shared_ptr, ptr.
template <typename U> weak_ptr(const shared_ptr<U>& ptr) {
CopyFrom(ptr.ptr_, ptr.control_block_);
}
// Copy a weak_ptr. The object it points to might disappear, but we
// don't care: we're only working with the control block, and it can't
// disappear while we're looking at because it's owned by at least one
// weak_ptr, ptr.
template <typename U> weak_ptr(const weak_ptr<U>& ptr) {
CopyFrom(ptr.ptr_, ptr.control_block_);
}
// Need non-templated version to prevent default copy constructor
weak_ptr(const weak_ptr& ptr) {
CopyFrom(ptr.ptr_, ptr.control_block_);
}
// Destroy the weak_ptr. If no shared_ptr owns the control block, and if
// we are the last weak_ptr to own it, then it can be deleted. Note that
// weak_count_ is defined as the number of weak_ptrs sharing this control
// block, plus 1 if there are any shared_ptrs. We therefore know that it's
// safe to delete the control block when weak_count_ reaches 0, without
// having to perform any additional tests.
~weak_ptr() {
if (control_block_ != NULL &&
!RefCountDec(&control_block_->weak_count_)) {
delete control_block_;
}
}
weak_ptr& operator=(const weak_ptr& ptr) {
if (&ptr != this) {
weak_ptr tmp(ptr);
tmp.swap(*this);
}
return *this;
}
template <typename U> weak_ptr& operator=(const weak_ptr<U>& ptr) {
weak_ptr tmp(ptr);
tmp.swap(*this);
return *this;
}
template <typename U> weak_ptr& operator=(const shared_ptr<U>& ptr) {
weak_ptr tmp(ptr);
tmp.swap(*this);
return *this;
}
void swap(weak_ptr& ptr) {
using std::swap; // http://go/using-std-swap
swap(ptr_, ptr.ptr_);
swap(control_block_, ptr.control_block_);
}
void reset() {
weak_ptr tmp;
tmp.swap(*this);
}
// Return the number of shared_ptrs that own the object we are observing.
// Note that this number can be 0 (if this pointer has expired).
long use_count() const {
return control_block_ != NULL ? control_block_->refcount_ : 0;
}
bool expired() const { return use_count() == 0; }
// Return a shared_ptr that owns the object we are observing. If we
// have expired, the shared_ptr will be empty. We have to be careful
// about concurrency, though, since some other thread might be
// destroying the last owning shared_ptr while we're in this
// function. We want to increment the refcount only if it's nonzero
// and get the new value, and we want that whole operation to be
// atomic.
shared_ptr<T> lock() const {
shared_ptr<T> result;
if (control_block_ != NULL) {
Atomic32 old_refcount;
do {
old_refcount = control_block_->refcount_;
if (old_refcount == 0)
break;
} while (old_refcount !=
NoBarrier_CompareAndSwap(
&control_block_->refcount_, old_refcount,
old_refcount + 1));
if (old_refcount > 0) {
result.ptr_ = ptr_;
result.control_block_ = control_block_;
}
}
return result;
}
private:
void CopyFrom(T* ptr, SharedPtrControlBlock* control_block) {
ptr_ = ptr;
control_block_ = control_block;
if (control_block_ != NULL)
RefCountInc(&control_block_->weak_count_);
}
private:
element_type* ptr_;
SharedPtrControlBlock* control_block_;
};
template <typename T> void swap(weak_ptr<T>& r, weak_ptr<T>& s) {
r.swap(s);
}
// See comments at the top of the file for a description of why this class
// exists, and section 20.8.10.5 of the draft C++ standard (as of July 2009
// the latest draft is N2914) for the detailed specification.
template <typename T>
class enable_shared_from_this {
friend class shared_ptr<T>;
public:
// Precondition: there must be a shared_ptr that owns *this and that was
// created, directly or indirectly, from a raw pointer of type T*. (The
// latter part of the condition is technical but not quite redundant; it
// rules out some complicated uses involving inheritance hierarchies.)
shared_ptr<T> shared_from_this() {
// Behavior is undefined if the precondition isn't satisfied; we choose
// to die with a CHECK failure.
GOOGLE_CHECK(!weak_this_.expired()) << "No shared_ptr owns this object";
return weak_this_.lock();
}
shared_ptr<const T> shared_from_this() const {
GOOGLE_CHECK(!weak_this_.expired()) << "No shared_ptr owns this object";
return weak_this_.lock();
}
protected:
enable_shared_from_this() { }
enable_shared_from_this(const enable_shared_from_this& other) { }
enable_shared_from_this& operator=(const enable_shared_from_this& other) {
return *this;
}
~enable_shared_from_this() { }
private:
weak_ptr<T> weak_this_;
};
// This is a helper function called by shared_ptr's constructor from a raw
// pointer. If T inherits from enable_shared_from_this<T>, it sets up
// weak_this_ so that shared_from_this works correctly. If T does not inherit
// from weak_this we get a different overload, defined inline, which does
// nothing.
template<typename T>
void shared_ptr<T>::MaybeSetupWeakThis(enable_shared_from_this<T>* ptr) {
if (ptr) {
GOOGLE_CHECK(ptr->weak_this_.expired())
<< "Object already owned by a shared_ptr";
ptr->weak_this_ = *this;
}
}
#endif // UTIL_GTL_USE_STD_SHARED_PTR
} // internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----
// Author: Matt Austern
//
// This code is compiled directly on many platforms, including client
// platforms like Windows, Mac, and embedded systems. Before making
// any changes here, make sure that you're not breaking any platforms.
//
// Define a small subset of tr1 type traits. The traits we define are:
// enable_if
// is_integral
// is_floating_point
// is_pointer
// is_enum
// is_reference
// is_pod
// has_trivial_constructor
// has_trivial_copy
// has_trivial_assign
// has_trivial_destructor
// remove_const
// remove_volatile
// remove_cv
// remove_reference
// add_reference
// remove_pointer
// is_same
// is_convertible
// We can add more type traits as required.
#ifndef GOOGLE_PROTOBUF_TYPE_TRAITS_H_
#define GOOGLE_PROTOBUF_TYPE_TRAITS_H_
#include <cstddef> // for NULL
#include <utility> // For pair
#include <google/protobuf/stubs/template_util.h> // For true_type and false_type
namespace google {
namespace protobuf {
namespace internal {
template<typename B, typename D>
struct is_base_of {
typedef char (&yes)[1];
typedef char (&no)[2];
// BEGIN GOOGLE LOCAL MODIFICATION -- check is a #define on Mac.
#undef check
// END GOOGLE LOCAL MODIFICATION
static yes check(const B*);
static no check(const void*);
enum {
value = sizeof(check(static_cast<const D*>(NULL))) == sizeof(yes),
};
};
template <bool cond, class T = void> struct enable_if;
template <class T> struct is_integral;
template <class T> struct is_floating_point;
template <class T> struct is_pointer;
// MSVC can't compile this correctly, and neither can gcc 3.3.5 (at least)
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum uses is_convertible, which is not available on MSVC.
template <class T> struct is_enum;
#endif
template <class T> struct is_reference;
template <class T> struct is_pod;
template <class T> struct has_trivial_constructor;
template <class T> struct has_trivial_copy;
template <class T> struct has_trivial_assign;
template <class T> struct has_trivial_destructor;
template <class T> struct remove_const;
template <class T> struct remove_volatile;
template <class T> struct remove_cv;
template <class T> struct remove_reference;
template <class T> struct add_reference;
template <class T> struct remove_pointer;
template <class T, class U> struct is_same;
#if !(defined(__GNUC__) && __GNUC__ <= 3)
template <class From, class To> struct is_convertible;
#endif
// enable_if, equivalent semantics to c++11 std::enable_if, specifically:
// "If B is true, the member typedef type shall equal T; otherwise, there
// shall be no member typedef type."
// Specified by 20.9.7.6 [Other transformations]
template<bool cond, class T> struct enable_if { typedef T type; };
template<class T> struct enable_if<false, T> {};
// is_integral is false except for the built-in integer types. A
// cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_integral : false_type { };
template<> struct is_integral<bool> : true_type { };
template<> struct is_integral<char> : true_type { };
template<> struct is_integral<unsigned char> : true_type { };
template<> struct is_integral<signed char> : true_type { };
#if defined(_MSC_VER)
// wchar_t is not by default a distinct type from unsigned short in
// Microsoft C.
// See http://msdn2.microsoft.com/en-us/library/dh8che7s(VS.80).aspx
template<> struct is_integral<__wchar_t> : true_type { };
#else
template<> struct is_integral<wchar_t> : true_type { };
#endif
template<> struct is_integral<short> : true_type { };
template<> struct is_integral<unsigned short> : true_type { };
template<> struct is_integral<int> : true_type { };
template<> struct is_integral<unsigned int> : true_type { };
template<> struct is_integral<long> : true_type { };
template<> struct is_integral<unsigned long> : true_type { };
#if defined(HAVE_LONG_LONG) || defined(_MSC_VER)
template<> struct is_integral<long long> : true_type { };
template<> struct is_integral<unsigned long long> : true_type { };
#endif
template <class T> struct is_integral<const T> : is_integral<T> { };
template <class T> struct is_integral<volatile T> : is_integral<T> { };
template <class T> struct is_integral<const volatile T> : is_integral<T> { };
// is_floating_point is false except for the built-in floating-point types.
// A cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_floating_point : false_type { };
template<> struct is_floating_point<float> : true_type { };
template<> struct is_floating_point<double> : true_type { };
template<> struct is_floating_point<long double> : true_type { };
template <class T> struct is_floating_point<const T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<volatile T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<const volatile T>
: is_floating_point<T> { };
// is_pointer is false except for pointer types. A cv-qualified type (e.g.
// "int* const", as opposed to "int const*") is cv-qualified if and only if
// the underlying type is.
template <class T> struct is_pointer : false_type { };
template <class T> struct is_pointer<T*> : true_type { };
template <class T> struct is_pointer<const T> : is_pointer<T> { };
template <class T> struct is_pointer<volatile T> : is_pointer<T> { };
template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
namespace type_traits_internal {
template <class T> struct is_class_or_union {
template <class U> static small_ tester(void (U::*)());
template <class U> static big_ tester(...);
static const bool value = sizeof(tester<T>(0)) == sizeof(small_);
};
// is_convertible chokes if the first argument is an array. That's why
// we use add_reference here.
template <bool NotUnum, class T> struct is_enum_impl
: is_convertible<typename add_reference<T>::type, int> { };
template <class T> struct is_enum_impl<true, T> : false_type { };
} // namespace type_traits_internal
// Specified by TR1 [4.5.1] primary type categories.
// Implementation note:
//
// Each type is either void, integral, floating point, array, pointer,
// reference, member object pointer, member function pointer, enum,
// union or class. Out of these, only integral, floating point, reference,
// class and enum types are potentially convertible to int. Therefore,
// if a type is not a reference, integral, floating point or class and
// is convertible to int, it's a enum. Adding cv-qualification to a type
// does not change whether it's an enum.
//
// Is-convertible-to-int check is done only if all other checks pass,
// because it can't be used with some types (e.g. void or classes with
// inaccessible conversion operators).
template <class T> struct is_enum
: type_traits_internal::is_enum_impl<
is_same<T, void>::value ||
is_integral<T>::value ||
is_floating_point<T>::value ||
is_reference<T>::value ||
type_traits_internal::is_class_or_union<T>::value,
T> { };
template <class T> struct is_enum<const T> : is_enum<T> { };
template <class T> struct is_enum<volatile T> : is_enum<T> { };
template <class T> struct is_enum<const volatile T> : is_enum<T> { };
#endif
// is_reference is false except for reference types.
template<typename T> struct is_reference : false_type {};
template<typename T> struct is_reference<T&> : true_type {};
// We can't get is_pod right without compiler help, so fail conservatively.
// We will assume it's false except for arithmetic types, enumerations,
// pointers and cv-qualified versions thereof. Note that std::pair<T,U>
// is not a POD even if T and U are PODs.
template <class T> struct is_pod
: integral_constant<bool, (is_integral<T>::value ||
is_floating_point<T>::value ||
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum is not available on MSVC.
is_enum<T>::value ||
#endif
is_pointer<T>::value)> { };
template <class T> struct is_pod<const T> : is_pod<T> { };
template <class T> struct is_pod<volatile T> : is_pod<T> { };
template <class T> struct is_pod<const volatile T> : is_pod<T> { };
// We can't get has_trivial_constructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// constructors. (3) array of a type with a trivial constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_constructor : is_pod<T> { };
template <class T, class U> struct has_trivial_constructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_constructor<T>::value &&
has_trivial_constructor<U>::value)> { };
template <class A, int N> struct has_trivial_constructor<A[N]>
: has_trivial_constructor<A> { };
template <class T> struct has_trivial_constructor<const T>
: has_trivial_constructor<T> { };
// We can't get has_trivial_copy right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial copy constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_copy : is_pod<T> { };
template <class T, class U> struct has_trivial_copy<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_copy<T>::value &&
has_trivial_copy<U>::value)> { };
template <class A, int N> struct has_trivial_copy<A[N]>
: has_trivial_copy<A> { };
template <class T> struct has_trivial_copy<const T> : has_trivial_copy<T> { };
// We can't get has_trivial_assign right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial assign constructor.
template <class T> struct has_trivial_assign : is_pod<T> { };
template <class T, class U> struct has_trivial_assign<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_assign<T>::value &&
has_trivial_assign<U>::value)> { };
template <class A, int N> struct has_trivial_assign<A[N]>
: has_trivial_assign<A> { };
// We can't get has_trivial_destructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// destructors. (3) array of a type with a trivial destructor.
// (4) const versions thereof.
template <class T> struct has_trivial_destructor : is_pod<T> { };
template <class T, class U> struct has_trivial_destructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_destructor<T>::value &&
has_trivial_destructor<U>::value)> { };
template <class A, int N> struct has_trivial_destructor<A[N]>
: has_trivial_destructor<A> { };
template <class T> struct has_trivial_destructor<const T>
: has_trivial_destructor<T> { };
// Specified by TR1 [4.7.1]
template<typename T> struct remove_const { typedef T type; };
template<typename T> struct remove_const<T const> { typedef T type; };
template<typename T> struct remove_volatile { typedef T type; };
template<typename T> struct remove_volatile<T volatile> { typedef T type; };
template<typename T> struct remove_cv {
typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
// Specified by TR1 [4.7.2] Reference modifications.
template<typename T> struct remove_reference { typedef T type; };
template<typename T> struct remove_reference<T&> { typedef T type; };
template <typename T> struct add_reference { typedef T& type; };
template <typename T> struct add_reference<T&> { typedef T& type; };
// Specified by TR1 [4.7.4] Pointer modifications.
template<typename T> struct remove_pointer { typedef T type; };
template<typename T> struct remove_pointer<T*> { typedef T type; };
template<typename T> struct remove_pointer<T* const> { typedef T type; };
template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
template<typename T> struct remove_pointer<T* const volatile> {
typedef T type; };
// Specified by TR1 [4.6] Relationships between types
template<typename T, typename U> struct is_same : public false_type { };
template<typename T> struct is_same<T, T> : public true_type { };
// Specified by TR1 [4.6] Relationships between types
#if !(defined(__GNUC__) && __GNUC__ <= 3)
namespace type_traits_internal {
// This class is an implementation detail for is_convertible, and you
// don't need to know how it works to use is_convertible. For those
// who care: we declare two different functions, one whose argument is
// of type To and one with a variadic argument list. We give them
// return types of different size, so we can use sizeof to trick the
// compiler into telling us which function it would have chosen if we
// had called it with an argument of type From. See Alexandrescu's
// _Modern C++ Design_ for more details on this sort of trick.
template <typename From, typename To>
struct ConvertHelper {
static small_ Test(To);
static big_ Test(...);
static From Create();
enum {
value = sizeof(Test(Create())) == sizeof(small_)
};
};
} // namespace type_traits_internal
// Inherits from true_type if From is convertible to To, false_type otherwise.
template <typename From, typename To>
struct is_convertible
: integral_constant<bool,
type_traits_internal::ConvertHelper<From, To>::value> {
};
#endif
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_TYPE_TRAITS_H_
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----
// Author: Matt Austern
#include <google/protobuf/stubs/type_traits.h>
#include <stdlib.h> // for exit()
#include <stdio.h>
#include <string>
#include <vector>
#include <google/protobuf/testing/googletest.h>
#include <gtest/gtest.h>
typedef int int32;
// IBM AIX typedefs `int64` in `sys/inttypes.h`, included transitively above.
#ifndef _AIX
typedef long int64;
#endif
using std::string;
using std::vector;
using std::pair;
// This assertion produces errors like "error: invalid use of
// incomplete type 'struct <unnamed>::AssertTypesEq<const int, int>'"
// when it fails.
template<typename T, typename U> struct AssertTypesEq;
template<typename T> struct AssertTypesEq<T, T> {};
#define COMPILE_ASSERT_TYPES_EQ(T, U) static_cast<void>(AssertTypesEq<T, U>())
// A user-defined POD type.
struct A {
int n_;
};
// A user-defined non-POD type with a trivial copy constructor.
class B {
public:
explicit B(int n) : n_(n) { }
private:
int n_;
};
// Another user-defined non-POD type with a trivial copy constructor.
// We will explicitly declare C to have a trivial copy constructor
// by specializing has_trivial_copy.
class C {
public:
explicit C(int n) : n_(n) { }
private:
int n_;
};
namespace google {
namespace protobuf {
namespace internal {
template<> struct has_trivial_copy<C> : true_type { };
} // namespace internal
} // namespace protobuf
} // namespace google
// Another user-defined non-POD type with a trivial assignment operator.
// We will explicitly declare C to have a trivial assignment operator
// by specializing has_trivial_assign.
class D {
public:
explicit D(int n) : n_(n) { }
private:
int n_;
};
namespace google {
namespace protobuf {
namespace internal {
template<> struct has_trivial_assign<D> : true_type { };
} // namespace internal
} // namespace protobuf
} // namespace google
// Another user-defined non-POD type with a trivial constructor.
// We will explicitly declare E to have a trivial constructor
// by specializing has_trivial_constructor.
class E {
public:
int n_;
};
namespace google {
namespace protobuf {
namespace internal {
template<> struct has_trivial_constructor<E> : true_type { };
} // namespace internal
} // namespace protobuf
} // namespace google
// Another user-defined non-POD type with a trivial destructor.
// We will explicitly declare E to have a trivial destructor
// by specializing has_trivial_destructor.
class F {
public:
explicit F(int n) : n_(n) { }
private:
int n_;
};
namespace google {
namespace protobuf {
namespace internal {
template<> struct has_trivial_destructor<F> : true_type { };
} // namespace internal
} // namespace protobuf
} // namespace google
enum G {};
union H {};
class I {
public:
operator int() const;
};
class J {
private:
operator int() const;
};
namespace google {
namespace protobuf {
namespace internal {
namespace {
// A base class and a derived class that inherits from it, used for
// testing conversion type traits.
class Base {
public:
virtual ~Base() { }
};
class Derived : public Base {
};
TEST(TypeTraitsTest, TestIsInteger) {
// Verify that is_integral is true for all integer types.
EXPECT_TRUE(is_integral<bool>::value);
EXPECT_TRUE(is_integral<char>::value);
EXPECT_TRUE(is_integral<unsigned char>::value);
EXPECT_TRUE(is_integral<signed char>::value);
EXPECT_TRUE(is_integral<wchar_t>::value);
EXPECT_TRUE(is_integral<int>::value);
EXPECT_TRUE(is_integral<unsigned int>::value);
EXPECT_TRUE(is_integral<short>::value);
EXPECT_TRUE(is_integral<unsigned short>::value);
EXPECT_TRUE(is_integral<long>::value);
EXPECT_TRUE(is_integral<unsigned long>::value);
// Verify that is_integral is false for a few non-integer types.
EXPECT_FALSE(is_integral<void>::value);
EXPECT_FALSE(is_integral<float>::value);
EXPECT_FALSE(is_integral<string>::value);
EXPECT_FALSE(is_integral<int*>::value);
EXPECT_FALSE(is_integral<A>::value);
EXPECT_FALSE((is_integral<pair<int, int> >::value));
// Verify that cv-qualified integral types are still integral, and
// cv-qualified non-integral types are still non-integral.
EXPECT_TRUE(is_integral<const char>::value);
EXPECT_TRUE(is_integral<volatile bool>::value);
EXPECT_TRUE(is_integral<const volatile unsigned int>::value);
EXPECT_FALSE(is_integral<const float>::value);
EXPECT_FALSE(is_integral<int* volatile>::value);
EXPECT_FALSE(is_integral<const volatile string>::value);
}
TEST(TypeTraitsTest, TestIsFloating) {
// Verify that is_floating_point is true for all floating-point types.
EXPECT_TRUE(is_floating_point<float>::value);
EXPECT_TRUE(is_floating_point<double>::value);
EXPECT_TRUE(is_floating_point<long double>::value);
// Verify that is_floating_point is false for a few non-float types.
EXPECT_FALSE(is_floating_point<void>::value);
EXPECT_FALSE(is_floating_point<long>::value);
EXPECT_FALSE(is_floating_point<string>::value);
EXPECT_FALSE(is_floating_point<float*>::value);
EXPECT_FALSE(is_floating_point<A>::value);
EXPECT_FALSE((is_floating_point<pair<int, int> >::value));
// Verify that cv-qualified floating point types are still floating, and
// cv-qualified non-floating types are still non-floating.
EXPECT_TRUE(is_floating_point<const float>::value);
EXPECT_TRUE(is_floating_point<volatile double>::value);
EXPECT_TRUE(is_floating_point<const volatile long double>::value);
EXPECT_FALSE(is_floating_point<const int>::value);
EXPECT_FALSE(is_floating_point<volatile string>::value);
EXPECT_FALSE(is_floating_point<const volatile char>::value);
}
TEST(TypeTraitsTest, TestIsPointer) {
// Verify that is_pointer is true for some pointer types.
EXPECT_TRUE(is_pointer<int*>::value);
EXPECT_TRUE(is_pointer<void*>::value);
EXPECT_TRUE(is_pointer<string*>::value);
EXPECT_TRUE(is_pointer<const void*>::value);
EXPECT_TRUE(is_pointer<volatile float* const*>::value);
// Verify that is_pointer is false for some non-pointer types.
EXPECT_FALSE(is_pointer<void>::value);
EXPECT_FALSE(is_pointer<float&>::value);
EXPECT_FALSE(is_pointer<long>::value);
EXPECT_FALSE(is_pointer<vector<int*> >::value);
EXPECT_FALSE(is_pointer<int[5]>::value);
// A function pointer is a pointer, but a function type, or a function
// reference type, is not.
EXPECT_TRUE(is_pointer<int (*)(int x)>::value);
EXPECT_FALSE(is_pointer<void(char x)>::value);
EXPECT_FALSE(is_pointer<double (&)(string x)>::value);
// Verify that is_pointer<T> is true for some cv-qualified pointer types,
// and false for some cv-qualified non-pointer types.
EXPECT_TRUE(is_pointer<int* const>::value);
EXPECT_TRUE(is_pointer<const void* volatile>::value);
EXPECT_TRUE(is_pointer<char** const volatile>::value);
EXPECT_FALSE(is_pointer<const int>::value);
EXPECT_FALSE(is_pointer<volatile std::vector<int*> >::value);
EXPECT_FALSE(is_pointer<const volatile double>::value);
}
TEST(TypeTraitsTest, TestIsEnum) {
// is_enum isn't supported on MSVC or gcc 3.x
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// Verify that is_enum is true for enum types.
EXPECT_TRUE(is_enum<G>::value);
EXPECT_TRUE(is_enum<const G>::value);
EXPECT_TRUE(is_enum<volatile G>::value);
EXPECT_TRUE(is_enum<const volatile G>::value);
// Verify that is_enum is false for a few non-enum types.
EXPECT_FALSE(is_enum<void>::value);
EXPECT_FALSE(is_enum<G&>::value);
EXPECT_FALSE(is_enum<G[1]>::value);
EXPECT_FALSE(is_enum<const G[1]>::value);
EXPECT_FALSE(is_enum<G[]>::value);
EXPECT_FALSE(is_enum<int>::value);
EXPECT_FALSE(is_enum<float>::value);
EXPECT_FALSE(is_enum<A>::value);
EXPECT_FALSE(is_enum<A*>::value);
EXPECT_FALSE(is_enum<const A>::value);
EXPECT_FALSE(is_enum<H>::value);
EXPECT_FALSE(is_enum<I>::value);
EXPECT_FALSE(is_enum<J>::value);
EXPECT_FALSE(is_enum<void()>::value);
EXPECT_FALSE(is_enum<void(*)()>::value);
EXPECT_FALSE(is_enum<int A::*>::value);
EXPECT_FALSE(is_enum<void (A::*)()>::value);
#endif
}
TEST(TypeTraitsTest, TestIsReference) {
// Verifies that is_reference is true for all reference types.
typedef float& RefFloat;
EXPECT_TRUE(is_reference<float&>::value);
EXPECT_TRUE(is_reference<const int&>::value);
EXPECT_TRUE(is_reference<const int*&>::value);
EXPECT_TRUE(is_reference<int (&)(bool)>::value);
EXPECT_TRUE(is_reference<RefFloat>::value);
EXPECT_TRUE(is_reference<const RefFloat>::value);
EXPECT_TRUE(is_reference<volatile RefFloat>::value);
EXPECT_TRUE(is_reference<const volatile RefFloat>::value);
// Verifies that is_reference is false for all non-reference types.
EXPECT_FALSE(is_reference<float>::value);
EXPECT_FALSE(is_reference<const float>::value);
EXPECT_FALSE(is_reference<volatile float>::value);
EXPECT_FALSE(is_reference<const volatile float>::value);
EXPECT_FALSE(is_reference<const int*>::value);
EXPECT_FALSE(is_reference<int()>::value);
EXPECT_FALSE(is_reference<void(*)(const char&)>::value);
}
TEST(TypeTraitsTest, TestAddReference) {
COMPILE_ASSERT_TYPES_EQ(int&, add_reference<int>::type);
COMPILE_ASSERT_TYPES_EQ(const int&, add_reference<const int>::type);
COMPILE_ASSERT_TYPES_EQ(volatile int&,
add_reference<volatile int>::type);
COMPILE_ASSERT_TYPES_EQ(const volatile int&,
add_reference<const volatile int>::type);
COMPILE_ASSERT_TYPES_EQ(int&, add_reference<int&>::type);
COMPILE_ASSERT_TYPES_EQ(const int&, add_reference<const int&>::type);
COMPILE_ASSERT_TYPES_EQ(volatile int&,
add_reference<volatile int&>::type);
COMPILE_ASSERT_TYPES_EQ(const volatile int&,
add_reference<const volatile int&>::type);
}
TEST(TypeTraitsTest, TestIsPod) {
// Verify that arithmetic types and pointers are marked as PODs.
EXPECT_TRUE(is_pod<bool>::value);
EXPECT_TRUE(is_pod<char>::value);
EXPECT_TRUE(is_pod<unsigned char>::value);
EXPECT_TRUE(is_pod<signed char>::value);
EXPECT_TRUE(is_pod<wchar_t>::value);
EXPECT_TRUE(is_pod<int>::value);
EXPECT_TRUE(is_pod<unsigned int>::value);
EXPECT_TRUE(is_pod<short>::value);
EXPECT_TRUE(is_pod<unsigned short>::value);
EXPECT_TRUE(is_pod<long>::value);
EXPECT_TRUE(is_pod<unsigned long>::value);
EXPECT_TRUE(is_pod<float>::value);
EXPECT_TRUE(is_pod<double>::value);
EXPECT_TRUE(is_pod<long double>::value);
EXPECT_TRUE(is_pod<string*>::value);
EXPECT_TRUE(is_pod<A*>::value);
EXPECT_TRUE(is_pod<const B*>::value);
EXPECT_TRUE(is_pod<C**>::value);
EXPECT_TRUE(is_pod<const int>::value);
EXPECT_TRUE(is_pod<char* volatile>::value);
EXPECT_TRUE(is_pod<const volatile double>::value);
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
EXPECT_TRUE(is_pod<G>::value);
EXPECT_TRUE(is_pod<const G>::value);
EXPECT_TRUE(is_pod<volatile G>::value);
EXPECT_TRUE(is_pod<const volatile G>::value);
#endif
// Verify that some non-POD types are not marked as PODs.
EXPECT_FALSE(is_pod<void>::value);
EXPECT_FALSE(is_pod<string>::value);
EXPECT_FALSE((is_pod<pair<int, int> >::value));
EXPECT_FALSE(is_pod<A>::value);
EXPECT_FALSE(is_pod<B>::value);
EXPECT_FALSE(is_pod<C>::value);
EXPECT_FALSE(is_pod<const string>::value);
EXPECT_FALSE(is_pod<volatile A>::value);
EXPECT_FALSE(is_pod<const volatile B>::value);
}
TEST(TypeTraitsTest, TestHasTrivialConstructor) {
// Verify that arithmetic types and pointers have trivial constructors.
EXPECT_TRUE(has_trivial_constructor<bool>::value);
EXPECT_TRUE(has_trivial_constructor<char>::value);
EXPECT_TRUE(has_trivial_constructor<unsigned char>::value);
EXPECT_TRUE(has_trivial_constructor<signed char>::value);
EXPECT_TRUE(has_trivial_constructor<wchar_t>::value);
EXPECT_TRUE(has_trivial_constructor<int>::value);
EXPECT_TRUE(has_trivial_constructor<unsigned int>::value);
EXPECT_TRUE(has_trivial_constructor<short>::value);
EXPECT_TRUE(has_trivial_constructor<unsigned short>::value);
EXPECT_TRUE(has_trivial_constructor<long>::value);
EXPECT_TRUE(has_trivial_constructor<unsigned long>::value);
EXPECT_TRUE(has_trivial_constructor<float>::value);
EXPECT_TRUE(has_trivial_constructor<double>::value);
EXPECT_TRUE(has_trivial_constructor<long double>::value);
EXPECT_TRUE(has_trivial_constructor<string*>::value);
EXPECT_TRUE(has_trivial_constructor<A*>::value);
EXPECT_TRUE(has_trivial_constructor<const B*>::value);
EXPECT_TRUE(has_trivial_constructor<C**>::value);
// Verify that pairs and arrays of such types have trivial
// constructors.
typedef int int10[10];
EXPECT_TRUE((has_trivial_constructor<pair<int, char*> >::value));
EXPECT_TRUE(has_trivial_constructor<int10>::value);
// Verify that pairs of types without trivial constructors
// are not marked as trivial.
EXPECT_FALSE((has_trivial_constructor<pair<int, string> >::value));
EXPECT_FALSE((has_trivial_constructor<pair<string, int> >::value));
// Verify that types without trivial constructors are
// correctly marked as such.
EXPECT_FALSE(has_trivial_constructor<string>::value);
EXPECT_FALSE(has_trivial_constructor<vector<int> >::value);
// Verify that E, which we have declared to have a trivial
// constructor, is correctly marked as such.
EXPECT_TRUE(has_trivial_constructor<E>::value);
}
TEST(TypeTraitsTest, TestHasTrivialCopy) {
// Verify that arithmetic types and pointers have trivial copy
// constructors.
EXPECT_TRUE(has_trivial_copy<bool>::value);
EXPECT_TRUE(has_trivial_copy<char>::value);
EXPECT_TRUE(has_trivial_copy<unsigned char>::value);
EXPECT_TRUE(has_trivial_copy<signed char>::value);
EXPECT_TRUE(has_trivial_copy<wchar_t>::value);
EXPECT_TRUE(has_trivial_copy<int>::value);
EXPECT_TRUE(has_trivial_copy<unsigned int>::value);
EXPECT_TRUE(has_trivial_copy<short>::value);
EXPECT_TRUE(has_trivial_copy<unsigned short>::value);
EXPECT_TRUE(has_trivial_copy<long>::value);
EXPECT_TRUE(has_trivial_copy<unsigned long>::value);
EXPECT_TRUE(has_trivial_copy<float>::value);
EXPECT_TRUE(has_trivial_copy<double>::value);
EXPECT_TRUE(has_trivial_copy<long double>::value);
EXPECT_TRUE(has_trivial_copy<string*>::value);
EXPECT_TRUE(has_trivial_copy<A*>::value);
EXPECT_TRUE(has_trivial_copy<const B*>::value);
EXPECT_TRUE(has_trivial_copy<C**>::value);
// Verify that pairs and arrays of such types have trivial
// copy constructors.
typedef int int10[10];
EXPECT_TRUE((has_trivial_copy<pair<int, char*> >::value));
EXPECT_TRUE(has_trivial_copy<int10>::value);
// Verify that pairs of types without trivial copy constructors
// are not marked as trivial.
EXPECT_FALSE((has_trivial_copy<pair<int, string> >::value));
EXPECT_FALSE((has_trivial_copy<pair<string, int> >::value));
// Verify that types without trivial copy constructors are
// correctly marked as such.
EXPECT_FALSE(has_trivial_copy<string>::value);
EXPECT_FALSE(has_trivial_copy<vector<int> >::value);
// Verify that C, which we have declared to have a trivial
// copy constructor, is correctly marked as such.
EXPECT_TRUE(has_trivial_copy<C>::value);
}
TEST(TypeTraitsTest, TestHasTrivialAssign) {
// Verify that arithmetic types and pointers have trivial assignment
// operators.
EXPECT_TRUE(has_trivial_assign<bool>::value);
EXPECT_TRUE(has_trivial_assign<char>::value);
EXPECT_TRUE(has_trivial_assign<unsigned char>::value);
EXPECT_TRUE(has_trivial_assign<signed char>::value);
EXPECT_TRUE(has_trivial_assign<wchar_t>::value);
EXPECT_TRUE(has_trivial_assign<int>::value);
EXPECT_TRUE(has_trivial_assign<unsigned int>::value);
EXPECT_TRUE(has_trivial_assign<short>::value);
EXPECT_TRUE(has_trivial_assign<unsigned short>::value);
EXPECT_TRUE(has_trivial_assign<long>::value);
EXPECT_TRUE(has_trivial_assign<unsigned long>::value);
EXPECT_TRUE(has_trivial_assign<float>::value);
EXPECT_TRUE(has_trivial_assign<double>::value);
EXPECT_TRUE(has_trivial_assign<long double>::value);
EXPECT_TRUE(has_trivial_assign<string*>::value);
EXPECT_TRUE(has_trivial_assign<A*>::value);
EXPECT_TRUE(has_trivial_assign<const B*>::value);
EXPECT_TRUE(has_trivial_assign<C**>::value);
// Verify that pairs and arrays of such types have trivial
// assignment operators.
typedef int int10[10];
EXPECT_TRUE((has_trivial_assign<pair<int, char*> >::value));
EXPECT_TRUE(has_trivial_assign<int10>::value);
// Verify that pairs of types without trivial assignment operators
// are not marked as trivial.
EXPECT_FALSE((has_trivial_assign<pair<int, string> >::value));
EXPECT_FALSE((has_trivial_assign<pair<string, int> >::value));
// Verify that types without trivial assignment operators are
// correctly marked as such.
EXPECT_FALSE(has_trivial_assign<string>::value);
EXPECT_FALSE(has_trivial_assign<vector<int> >::value);
// Verify that D, which we have declared to have a trivial
// assignment operator, is correctly marked as such.
EXPECT_TRUE(has_trivial_assign<D>::value);
}
TEST(TypeTraitsTest, TestHasTrivialDestructor) {
// Verify that arithmetic types and pointers have trivial destructors.
EXPECT_TRUE(has_trivial_destructor<bool>::value);
EXPECT_TRUE(has_trivial_destructor<char>::value);
EXPECT_TRUE(has_trivial_destructor<unsigned char>::value);
EXPECT_TRUE(has_trivial_destructor<signed char>::value);
EXPECT_TRUE(has_trivial_destructor<wchar_t>::value);
EXPECT_TRUE(has_trivial_destructor<int>::value);
EXPECT_TRUE(has_trivial_destructor<unsigned int>::value);
EXPECT_TRUE(has_trivial_destructor<short>::value);
EXPECT_TRUE(has_trivial_destructor<unsigned short>::value);
EXPECT_TRUE(has_trivial_destructor<long>::value);
EXPECT_TRUE(has_trivial_destructor<unsigned long>::value);
EXPECT_TRUE(has_trivial_destructor<float>::value);
EXPECT_TRUE(has_trivial_destructor<double>::value);
EXPECT_TRUE(has_trivial_destructor<long double>::value);
EXPECT_TRUE(has_trivial_destructor<string*>::value);
EXPECT_TRUE(has_trivial_destructor<A*>::value);
EXPECT_TRUE(has_trivial_destructor<const B*>::value);
EXPECT_TRUE(has_trivial_destructor<C**>::value);
// Verify that pairs and arrays of such types have trivial
// destructors.
typedef int int10[10];
EXPECT_TRUE((has_trivial_destructor<pair<int, char*> >::value));
EXPECT_TRUE(has_trivial_destructor<int10>::value);
// Verify that pairs of types without trivial destructors
// are not marked as trivial.
EXPECT_FALSE((has_trivial_destructor<pair<int, string> >::value));
EXPECT_FALSE((has_trivial_destructor<pair<string, int> >::value));
// Verify that types without trivial destructors are
// correctly marked as such.
EXPECT_FALSE(has_trivial_destructor<string>::value);
EXPECT_FALSE(has_trivial_destructor<vector<int> >::value);
// Verify that F, which we have declared to have a trivial
// destructor, is correctly marked as such.
EXPECT_TRUE(has_trivial_destructor<F>::value);
}
// Tests remove_pointer.
TEST(TypeTraitsTest, TestRemovePointer) {
COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int*>::type);
COMPILE_ASSERT_TYPES_EQ(const int, remove_pointer<const int*>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int* const>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_pointer<int* volatile>::type);
}
TEST(TypeTraitsTest, TestRemoveConst) {
COMPILE_ASSERT_TYPES_EQ(int, remove_const<int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_const<const int>::type);
COMPILE_ASSERT_TYPES_EQ(int *, remove_const<int * const>::type);
// TR1 examples.
COMPILE_ASSERT_TYPES_EQ(const int *, remove_const<const int *>::type);
COMPILE_ASSERT_TYPES_EQ(volatile int,
remove_const<const volatile int>::type);
}
TEST(TypeTraitsTest, TestRemoveVolatile) {
COMPILE_ASSERT_TYPES_EQ(int, remove_volatile<int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_volatile<volatile int>::type);
COMPILE_ASSERT_TYPES_EQ(int *, remove_volatile<int * volatile>::type);
// TR1 examples.
COMPILE_ASSERT_TYPES_EQ(volatile int *,
remove_volatile<volatile int *>::type);
COMPILE_ASSERT_TYPES_EQ(const int,
remove_volatile<const volatile int>::type);
}
TEST(TypeTraitsTest, TestRemoveCV) {
COMPILE_ASSERT_TYPES_EQ(int, remove_cv<int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_cv<volatile int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_cv<const int>::type);
COMPILE_ASSERT_TYPES_EQ(int *, remove_cv<int * const volatile>::type);
// TR1 examples.
COMPILE_ASSERT_TYPES_EQ(const volatile int *,
remove_cv<const volatile int *>::type);
COMPILE_ASSERT_TYPES_EQ(int,
remove_cv<const volatile int>::type);
}
TEST(TypeTraitsTest, TestRemoveReference) {
COMPILE_ASSERT_TYPES_EQ(int, remove_reference<int>::type);
COMPILE_ASSERT_TYPES_EQ(int, remove_reference<int&>::type);
COMPILE_ASSERT_TYPES_EQ(const int, remove_reference<const int&>::type);
COMPILE_ASSERT_TYPES_EQ(int*, remove_reference<int * &>::type);
}
TEST(TypeTraitsTest, TestIsSame) {
EXPECT_TRUE((is_same<int32, int32>::value));
EXPECT_FALSE((is_same<int32, int64>::value));
EXPECT_FALSE((is_same<int64, int32>::value));
EXPECT_FALSE((is_same<int, const int>::value));
EXPECT_TRUE((is_same<void, void>::value));
EXPECT_FALSE((is_same<void, int>::value));
EXPECT_FALSE((is_same<int, void>::value));
EXPECT_TRUE((is_same<int*, int*>::value));
EXPECT_TRUE((is_same<void*, void*>::value));
EXPECT_FALSE((is_same<int*, void*>::value));
EXPECT_FALSE((is_same<void*, int*>::value));
EXPECT_FALSE((is_same<void*, const void*>::value));
EXPECT_FALSE((is_same<void*, void* const>::value));
EXPECT_TRUE((is_same<Base*, Base*>::value));
EXPECT_TRUE((is_same<Derived*, Derived*>::value));
EXPECT_FALSE((is_same<Base*, Derived*>::value));
EXPECT_FALSE((is_same<Derived*, Base*>::value));
}
TEST(TypeTraitsTest, TestConvertible) {
#if !(defined(__GNUC__) && __GNUC__ <= 3)
EXPECT_TRUE((is_convertible<int, int>::value));
EXPECT_TRUE((is_convertible<int, long>::value));
EXPECT_TRUE((is_convertible<long, int>::value));
EXPECT_TRUE((is_convertible<int*, void*>::value));
EXPECT_FALSE((is_convertible<void*, int*>::value));
EXPECT_TRUE((is_convertible<Derived*, Base*>::value));
EXPECT_FALSE((is_convertible<Base*, Derived*>::value));
EXPECT_TRUE((is_convertible<Derived*, const Base*>::value));
EXPECT_FALSE((is_convertible<const Derived*, Base*>::value));
#endif
}
} // anonymous namespace
} // namespace internal
} // namespace protobuf
} // namespace google
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment