Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
P
protobuf
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
protobuf
Commits
4f68d92c
Commit
4f68d92c
authored
Jul 25, 2013
by
xiaofeng@google.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add support for ThreadSanitizer atomicops API to protobuf - patch from glider@
parent
c9f69500
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
396 additions
and
1 deletion
+396
-1
atomicops.h
src/google/protobuf/stubs/atomicops.h
+4
-1
atomicops_internals_tsan.h
src/google/protobuf/stubs/atomicops_internals_tsan.h
+392
-0
No files found.
src/google/protobuf/stubs/atomicops.h
View file @
4f68d92c
...
...
@@ -160,8 +160,11 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#define GOOGLE_PROTOBUF_ATOMICOPS_ERROR \
#error "Atomic operations are not supported on your platform"
// ThreadSanitizer, http://clang.llvm.org/docs/ThreadSanitizer.html.
#if defined(THREAD_SANITIZER)
#include <google/protobuf/stubs/atomicops_internals_tsan.h>
// MSVC.
#if defined(_MSC_VER)
#
el
if defined(_MSC_VER)
#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)
#include <google/protobuf/stubs/atomicops_internals_x86_msvc.h>
#else
...
...
src/google/protobuf/stubs/atomicops_internals_tsan.h
0 → 100644
View file @
4f68d92c
// Protocol Buffers - Google's data interchange format
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).
// Use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace
google
{
namespace
protobuf
{
namespace
internal
{
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
#ifdef __cplusplus
extern
"C"
{
#endif
typedef
char
__tsan_atomic8
;
typedef
short
__tsan_atomic16
;
// NOLINT
typedef
int
__tsan_atomic32
;
typedef
long
__tsan_atomic64
;
// NOLINT
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
typedef
__int128
__tsan_atomic128
;
#define __TSAN_HAS_INT128 1
#else
typedef
char
__tsan_atomic128
;
#define __TSAN_HAS_INT128 0
#endif
typedef
enum
{
__tsan_memory_order_relaxed
,
__tsan_memory_order_consume
,
__tsan_memory_order_acquire
,
__tsan_memory_order_release
,
__tsan_memory_order_acq_rel
,
__tsan_memory_order_seq_cst
,
}
__tsan_memory_order
;
__tsan_atomic8
__tsan_atomic8_load
(
const
volatile
__tsan_atomic8
*
a
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_load
(
const
volatile
__tsan_atomic16
*
a
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_load
(
const
volatile
__tsan_atomic32
*
a
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_load
(
const
volatile
__tsan_atomic64
*
a
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_load
(
const
volatile
__tsan_atomic128
*
a
,
__tsan_memory_order
mo
);
void
__tsan_atomic8_store
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
void
__tsan_atomic16_store
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
void
__tsan_atomic32_store
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
void
__tsan_atomic64_store
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
void
__tsan_atomic128_store
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_exchange
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_exchange
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_exchange
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_exchange
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_exchange
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_add
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_add
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_add
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_add
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_add
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_and
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_and
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_and
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_and
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_and
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_or
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_or
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_or
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_or
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_or
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_xor
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_xor
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_xor
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_xor
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_xor
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_nand
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_nand
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_nand
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_nand
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_nand
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
int
__tsan_atomic8_compare_exchange_weak
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic16_compare_exchange_weak
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic32_compare_exchange_weak
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic64_compare_exchange_weak
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic128_compare_exchange_weak
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
*
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic8_compare_exchange_strong
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic16_compare_exchange_strong
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic32_compare_exchange_strong
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic64_compare_exchange_strong
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic128_compare_exchange_strong
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
*
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic8
__tsan_atomic8_compare_exchange_val
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic16
__tsan_atomic16_compare_exchange_val
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic32
__tsan_atomic32_compare_exchange_val
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic64
__tsan_atomic64_compare_exchange_val
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic128
__tsan_atomic128_compare_exchange_val
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
void
__tsan_atomic_thread_fence
(
__tsan_memory_order
mo
);
void
__tsan_atomic_signal_fence
(
__tsan_memory_order
mo
);
#ifdef __cplusplus
}
// extern "C"
#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
inline
Atomic32
NoBarrier_CompareAndSwap
(
volatile
Atomic32
*
ptr
,
Atomic32
old_value
,
Atomic32
new_value
)
{
Atomic32
cmp
=
old_value
;
__tsan_atomic32_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_relaxed
,
__tsan_memory_order_relaxed
);
return
cmp
;
}
inline
Atomic32
NoBarrier_AtomicExchange
(
volatile
Atomic32
*
ptr
,
Atomic32
new_value
)
{
return
__tsan_atomic32_exchange
(
ptr
,
new_value
,
__tsan_memory_order_relaxed
);
}
inline
Atomic32
Acquire_AtomicExchange
(
volatile
Atomic32
*
ptr
,
Atomic32
new_value
)
{
return
__tsan_atomic32_exchange
(
ptr
,
new_value
,
__tsan_memory_order_acquire
);
}
inline
Atomic32
Release_AtomicExchange
(
volatile
Atomic32
*
ptr
,
Atomic32
new_value
)
{
return
__tsan_atomic32_exchange
(
ptr
,
new_value
,
__tsan_memory_order_release
);
}
inline
Atomic32
NoBarrier_AtomicIncrement
(
volatile
Atomic32
*
ptr
,
Atomic32
increment
)
{
return
increment
+
__tsan_atomic32_fetch_add
(
ptr
,
increment
,
__tsan_memory_order_relaxed
);
}
inline
Atomic32
Barrier_AtomicIncrement
(
volatile
Atomic32
*
ptr
,
Atomic32
increment
)
{
return
increment
+
__tsan_atomic32_fetch_add
(
ptr
,
increment
,
__tsan_memory_order_acq_rel
);
}
inline
Atomic32
Acquire_CompareAndSwap
(
volatile
Atomic32
*
ptr
,
Atomic32
old_value
,
Atomic32
new_value
)
{
Atomic32
cmp
=
old_value
;
__tsan_atomic32_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_acquire
,
__tsan_memory_order_acquire
);
return
cmp
;
}
inline
Atomic32
Release_CompareAndSwap
(
volatile
Atomic32
*
ptr
,
Atomic32
old_value
,
Atomic32
new_value
)
{
Atomic32
cmp
=
old_value
;
__tsan_atomic32_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_release
,
__tsan_memory_order_relaxed
);
return
cmp
;
}
inline
void
NoBarrier_Store
(
volatile
Atomic32
*
ptr
,
Atomic32
value
)
{
__tsan_atomic32_store
(
ptr
,
value
,
__tsan_memory_order_relaxed
);
}
inline
void
Acquire_Store
(
volatile
Atomic32
*
ptr
,
Atomic32
value
)
{
__tsan_atomic32_store
(
ptr
,
value
,
__tsan_memory_order_relaxed
);
__tsan_atomic_thread_fence
(
__tsan_memory_order_seq_cst
);
}
inline
void
Release_Store
(
volatile
Atomic32
*
ptr
,
Atomic32
value
)
{
__tsan_atomic32_store
(
ptr
,
value
,
__tsan_memory_order_release
);
}
inline
Atomic32
NoBarrier_Load
(
volatile
const
Atomic32
*
ptr
)
{
return
__tsan_atomic32_load
(
ptr
,
__tsan_memory_order_relaxed
);
}
inline
Atomic32
Acquire_Load
(
volatile
const
Atomic32
*
ptr
)
{
return
__tsan_atomic32_load
(
ptr
,
__tsan_memory_order_acquire
);
}
inline
Atomic32
Release_Load
(
volatile
const
Atomic32
*
ptr
)
{
__tsan_atomic_thread_fence
(
__tsan_memory_order_seq_cst
);
return
__tsan_atomic32_load
(
ptr
,
__tsan_memory_order_relaxed
);
}
inline
Atomic64
NoBarrier_CompareAndSwap
(
volatile
Atomic64
*
ptr
,
Atomic64
old_value
,
Atomic64
new_value
)
{
Atomic64
cmp
=
old_value
;
__tsan_atomic64_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_relaxed
,
__tsan_memory_order_relaxed
);
return
cmp
;
}
inline
Atomic64
NoBarrier_AtomicExchange
(
volatile
Atomic64
*
ptr
,
Atomic64
new_value
)
{
return
__tsan_atomic64_exchange
(
ptr
,
new_value
,
__tsan_memory_order_relaxed
);
}
inline
Atomic64
Acquire_AtomicExchange
(
volatile
Atomic64
*
ptr
,
Atomic64
new_value
)
{
return
__tsan_atomic64_exchange
(
ptr
,
new_value
,
__tsan_memory_order_acquire
);
}
inline
Atomic64
Release_AtomicExchange
(
volatile
Atomic64
*
ptr
,
Atomic64
new_value
)
{
return
__tsan_atomic64_exchange
(
ptr
,
new_value
,
__tsan_memory_order_release
);
}
inline
Atomic64
NoBarrier_AtomicIncrement
(
volatile
Atomic64
*
ptr
,
Atomic64
increment
)
{
return
increment
+
__tsan_atomic64_fetch_add
(
ptr
,
increment
,
__tsan_memory_order_relaxed
);
}
inline
Atomic64
Barrier_AtomicIncrement
(
volatile
Atomic64
*
ptr
,
Atomic64
increment
)
{
return
increment
+
__tsan_atomic64_fetch_add
(
ptr
,
increment
,
__tsan_memory_order_acq_rel
);
}
inline
void
NoBarrier_Store
(
volatile
Atomic64
*
ptr
,
Atomic64
value
)
{
__tsan_atomic64_store
(
ptr
,
value
,
__tsan_memory_order_relaxed
);
}
inline
void
Acquire_Store
(
volatile
Atomic64
*
ptr
,
Atomic64
value
)
{
__tsan_atomic64_store
(
ptr
,
value
,
__tsan_memory_order_relaxed
);
__tsan_atomic_thread_fence
(
__tsan_memory_order_seq_cst
);
}
inline
void
Release_Store
(
volatile
Atomic64
*
ptr
,
Atomic64
value
)
{
__tsan_atomic64_store
(
ptr
,
value
,
__tsan_memory_order_release
);
}
inline
Atomic64
NoBarrier_Load
(
volatile
const
Atomic64
*
ptr
)
{
return
__tsan_atomic64_load
(
ptr
,
__tsan_memory_order_relaxed
);
}
inline
Atomic64
Acquire_Load
(
volatile
const
Atomic64
*
ptr
)
{
return
__tsan_atomic64_load
(
ptr
,
__tsan_memory_order_acquire
);
}
inline
Atomic64
Release_Load
(
volatile
const
Atomic64
*
ptr
)
{
__tsan_atomic_thread_fence
(
__tsan_memory_order_seq_cst
);
return
__tsan_atomic64_load
(
ptr
,
__tsan_memory_order_relaxed
);
}
inline
Atomic64
Acquire_CompareAndSwap
(
volatile
Atomic64
*
ptr
,
Atomic64
old_value
,
Atomic64
new_value
)
{
Atomic64
cmp
=
old_value
;
__tsan_atomic64_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_acquire
,
__tsan_memory_order_acquire
);
return
cmp
;
}
inline
Atomic64
Release_CompareAndSwap
(
volatile
Atomic64
*
ptr
,
Atomic64
old_value
,
Atomic64
new_value
)
{
Atomic64
cmp
=
old_value
;
__tsan_atomic64_compare_exchange_strong
(
ptr
,
&
cmp
,
new_value
,
__tsan_memory_order_release
,
__tsan_memory_order_relaxed
);
return
cmp
;
}
inline
void
MemoryBarrier
()
{
__tsan_atomic_thread_fence
(
__tsan_memory_order_seq_cst
);
}
}
// namespace internal
}
// namespace protobuf
}
// namespace google
#undef ATOMICOPS_COMPILER_BARRIER
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment