Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
P
protobuf
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
protobuf
Commits
3aa5ea95
Commit
3aa5ea95
authored
Apr 09, 2014
by
xiaofeng@google.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix and improve arm64 atomic operations in protobuf stubs (patch from rmcilroy@).
https://codereview.appspot.com/86030044/
parent
2ca19bd8
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
56 additions
and
97 deletions
+56
-97
atomicops_internals_arm64_gcc.h
src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
+56
-97
No files found.
src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
View file @
3aa5ea95
...
...
@@ -38,12 +38,16 @@ namespace protobuf {
namespace
internal
{
inline
void
MemoryBarrier
()
{
__asm__
__volatile__
(
// NOLINT
"dmb ish
\n\t
"
// Data memory barrier.
:::
"memory"
);
// NOLINT
__asm__
__volatile__
(
"dmb ish"
:::
"memory"
);
// NOLINT
}
// NoBarrier versions of the operation include "memory" in the clobber list.
// This is not required for direct usage of the NoBarrier versions of the
// operations. However this is required for correctness when they are used as
// part of the Acquire or Release versions, to ensure that nothing from outside
// the call is reordered between the operation and the memory barrier. This does
// not change the code generated, so has no or minimal impact on the
// NoBarrier operations.
inline
Atomic32
NoBarrier_CompareAndSwap
(
volatile
Atomic32
*
ptr
,
Atomic32
old_value
,
...
...
@@ -59,13 +63,12 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
"stxr %w[temp], %w[new_value], %[ptr]
\n\t
"
// Try to store the new value.
"cbnz %w[temp], 0b
\n\t
"
// Retry if it did not work.
"1:
\n\t
"
"clrex
\n\t
"
// In case we didn't swap.
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
:
[
old_value
]
"
IJ
r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"
memory"
,
"cc
"
:
"
cc"
,
"memory
"
);
// NOLINT
return
prev
;
...
...
@@ -105,7 +108,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
:
[
result
]
"=&r"
(
result
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
increment
]
"r"
(
increment
)
:
[
increment
]
"
IJ
r"
(
increment
)
:
"memory"
);
// NOLINT
...
...
@@ -114,8 +117,10 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline
Atomic32
Barrier_AtomicIncrement
(
volatile
Atomic32
*
ptr
,
Atomic32
increment
)
{
Atomic32
result
;
MemoryBarrier
();
Atomic32
result
=
NoBarrier_AtomicIncrement
(
ptr
,
increment
);
result
=
NoBarrier_AtomicIncrement
(
ptr
,
increment
);
MemoryBarrier
();
return
result
;
...
...
@@ -125,27 +130,9 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32
old_value
,
Atomic32
new_value
)
{
Atomic32
prev
;
int32_t
temp
;
__asm__
__volatile__
(
// NOLINT
"0:
\n\t
"
"ldxr %w[prev], %[ptr]
\n\t
"
// Load the previous value.
"cmp %w[prev], %w[old_value]
\n\t
"
"bne 1f
\n\t
"
"stxr %w[temp], %w[new_value], %[ptr]
\n\t
"
// Try to store the new value.
"cbnz %w[temp], 0b
\n\t
"
// Retry if it did not work.
"dmb ish
\n\t
"
// Data memory barrier.
"1:
\n\t
"
// If the compare failed the 'dmb' is unnecessary, but we still need a
// 'clrex'.
"clrex
\n\t
"
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"memory"
,
"cc"
);
// NOLINT
prev
=
NoBarrier_CompareAndSwap
(
ptr
,
old_value
,
new_value
);
MemoryBarrier
();
return
prev
;
}
...
...
@@ -154,27 +141,9 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32
old_value
,
Atomic32
new_value
)
{
Atomic32
prev
;
int32_t
temp
;
MemoryBarrier
();
__asm__
__volatile__
(
// NOLINT
"0:
\n\t
"
"ldxr %w[prev], %[ptr]
\n\t
"
// Load the previous value.
"cmp %w[prev], %w[old_value]
\n\t
"
"bne 1f
\n\t
"
"stxr %w[temp], %w[new_value], %[ptr]
\n\t
"
// Try to store the new value.
"cbnz %w[temp], 0b
\n\t
"
// Retry if it did not work.
"1:
\n\t
"
// If the compare failed the we still need a 'clrex'.
"clrex
\n\t
"
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"memory"
,
"cc"
);
// NOLINT
prev
=
NoBarrier_CompareAndSwap
(
ptr
,
old_value
,
new_value
);
return
prev
;
}
...
...
@@ -189,8 +158,12 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
}
inline
void
Release_Store
(
volatile
Atomic32
*
ptr
,
Atomic32
value
)
{
MemoryBarrier
();
*
ptr
=
value
;
__asm__
__volatile__
(
// NOLINT
"stlr %w[value], %[ptr]
\n\t
"
:
[
ptr
]
"=Q"
(
*
ptr
)
:
[
value
]
"r"
(
value
)
:
"memory"
);
// NOLINT
}
inline
Atomic32
NoBarrier_Load
(
volatile
const
Atomic32
*
ptr
)
{
...
...
@@ -198,8 +171,15 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
}
inline
Atomic32
Acquire_Load
(
volatile
const
Atomic32
*
ptr
)
{
Atomic32
value
=
*
ptr
;
MemoryBarrier
();
Atomic32
value
;
__asm__
__volatile__
(
// NOLINT
"ldar %w[value], %[ptr]
\n\t
"
:
[
value
]
"=r"
(
value
)
:
[
ptr
]
"Q"
(
*
ptr
)
:
"memory"
);
// NOLINT
return
value
;
}
...
...
@@ -225,13 +205,12 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
"stxr %w[temp], %[new_value], %[ptr]
\n\t
"
"cbnz %w[temp], 0b
\n\t
"
"1:
\n\t
"
"clrex
\n\t
"
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
:
[
old_value
]
"
IJ
r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"
memory"
,
"cc
"
:
"
cc"
,
"memory
"
);
// NOLINT
return
prev
;
...
...
@@ -271,7 +250,7 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
:
[
result
]
"=&r"
(
result
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
increment
]
"r"
(
increment
)
:
[
increment
]
"
IJ
r"
(
increment
)
:
"memory"
);
// NOLINT
...
...
@@ -280,8 +259,10 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
inline
Atomic64
Barrier_AtomicIncrement
(
volatile
Atomic64
*
ptr
,
Atomic64
increment
)
{
Atomic64
result
;
MemoryBarrier
();
Atomic64
result
=
NoBarrier_AtomicIncrement
(
ptr
,
increment
);
result
=
NoBarrier_AtomicIncrement
(
ptr
,
increment
);
MemoryBarrier
();
return
result
;
...
...
@@ -291,25 +272,9 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64
old_value
,
Atomic64
new_value
)
{
Atomic64
prev
;
int32_t
temp
;
__asm__
__volatile__
(
// NOLINT
"0:
\n\t
"
"ldxr %[prev], %[ptr]
\n\t
"
"cmp %[prev], %[old_value]
\n\t
"
"bne 1f
\n\t
"
"stxr %w[temp], %[new_value], %[ptr]
\n\t
"
"cbnz %w[temp], 0b
\n\t
"
"dmb ish
\n\t
"
"1:
\n\t
"
"clrex
\n\t
"
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"memory"
,
"cc"
);
// NOLINT
prev
=
NoBarrier_CompareAndSwap
(
ptr
,
old_value
,
new_value
);
MemoryBarrier
();
return
prev
;
}
...
...
@@ -318,26 +283,9 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64
old_value
,
Atomic64
new_value
)
{
Atomic64
prev
;
int32_t
temp
;
MemoryBarrier
();
__asm__
__volatile__
(
// NOLINT
"0:
\n\t
"
"ldxr %[prev], %[ptr]
\n\t
"
"cmp %[prev], %[old_value]
\n\t
"
"bne 1f
\n\t
"
"stxr %w[temp], %[new_value], %[ptr]
\n\t
"
"cbnz %w[temp], 0b
\n\t
"
"1:
\n\t
"
"clrex
\n\t
"
:
[
prev
]
"=&r"
(
prev
),
[
temp
]
"=&r"
(
temp
),
[
ptr
]
"+Q"
(
*
ptr
)
:
[
old_value
]
"r"
(
old_value
),
[
new_value
]
"r"
(
new_value
)
:
"memory"
,
"cc"
);
// NOLINT
prev
=
NoBarrier_CompareAndSwap
(
ptr
,
old_value
,
new_value
);
return
prev
;
}
...
...
@@ -352,8 +300,12 @@ inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
}
inline
void
Release_Store
(
volatile
Atomic64
*
ptr
,
Atomic64
value
)
{
MemoryBarrier
();
*
ptr
=
value
;
__asm__
__volatile__
(
// NOLINT
"stlr %x[value], %[ptr]
\n\t
"
:
[
ptr
]
"=Q"
(
*
ptr
)
:
[
value
]
"r"
(
value
)
:
"memory"
);
// NOLINT
}
inline
Atomic64
NoBarrier_Load
(
volatile
const
Atomic64
*
ptr
)
{
...
...
@@ -361,8 +313,15 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
}
inline
Atomic64
Acquire_Load
(
volatile
const
Atomic64
*
ptr
)
{
Atomic64
value
=
*
ptr
;
MemoryBarrier
();
Atomic32
value
;
__asm__
__volatile__
(
// NOLINT
"ldar %x[value], %[ptr]
\n\t
"
:
[
value
]
"=r"
(
value
)
:
[
ptr
]
"Q"
(
*
ptr
)
:
"memory"
);
// NOLINT
return
value
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment