Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
C
capnproto
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
capnproto
Commits
fc620c73
Commit
fc620c73
authored
Dec 22, 2017
by
Kenton Varda
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add atomic refcounting implementation.
parent
cec589a9
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
116 additions
and
0 deletions
+116
-0
refcount.c++
c++/src/kj/refcount.c++
+33
-0
refcount.h
c++/src/kj/refcount.h
+83
-0
No files found.
c++/src/kj/refcount.c++
View file @
fc620c73
...
...
@@ -25,6 +25,9 @@
namespace
kj
{
// =======================================================================================
// Non-atomic (thread-unsafe) refcounting
Refcounted
::~
Refcounted
()
noexcept
(
false
)
{
KJ_ASSERT
(
refcount
==
0
,
"Refcounted object deleted with non-zero refcount."
);
}
...
...
@@ -35,4 +38,34 @@ void Refcounted::disposeImpl(void* pointer) const {
}
}
// =======================================================================================
// Atomic (thread-safe) refcounting
AtomicRefcounted
::~
AtomicRefcounted
()
noexcept
(
false
)
{
KJ_ASSERT
(
refcount
==
0
,
"Refcounted object deleted with non-zero refcount."
);
}
void
AtomicRefcounted
::
disposeImpl
(
void
*
pointer
)
const
{
if
(
__atomic_sub_fetch
(
&
refcount
,
1
,
__ATOMIC_RELEASE
)
==
0
)
{
__atomic_thread_fence
(
__ATOMIC_ACQUIRE
);
delete
this
;
}
}
bool
AtomicRefcounted
::
addRefWeakInternal
()
const
{
for
(;;)
{
uint
orig
=
__atomic_load_n
(
&
refcount
,
__ATOMIC_RELAXED
);
if
(
orig
==
0
)
{
// Refcount already hit zero. Destructor is already running so we can't revive the object.
return
false
;
}
if
(
__atomic_compare_exchange_n
(
&
refcount
,
&
orig
,
orig
+
1
,
true
,
__ATOMIC_RELAXED
,
__ATOMIC_RELAXED
))
{
// Successfully incremented refcount without letting it hit zero.
return
true
;
}
}
}
}
// namespace kj
c++/src/kj/refcount.h
View file @
fc620c73
...
...
@@ -30,6 +30,9 @@
namespace
kj
{
// =======================================================================================
// Non-atomic (thread-unsafe) refcounting
class
Refcounted
:
private
Disposer
{
// Subclass this to create a class that contains a reference count. Then, use
// `kj::refcounted<T>()` to allocate a new refcounted pointer.
...
...
@@ -102,6 +105,86 @@ Own<T> Refcounted::addRefInternal(T* object) {
return
Own
<
T
>
(
object
,
*
refcounted
);
}
// =======================================================================================
// Atomic (thread-safe) refcounting
//
// Warning: Atomic ops are SLOW.
class
AtomicRefcounted
:
private
kj
::
Disposer
{
public
:
virtual
~
AtomicRefcounted
()
noexcept
(
false
);
inline
bool
isShared
()
const
{
return
__atomic_load_n
(
&
refcount
,
__ATOMIC_ACQUIRE
)
>
1
;
}
private
:
mutable
uint
refcount
=
0
;
bool
addRefWeakInternal
()
const
;
void
disposeImpl
(
void
*
pointer
)
const
override
;
template
<
typename
T
>
static
kj
::
Own
<
T
>
addRefInternal
(
T
*
object
);
template
<
typename
T
>
static
kj
::
Own
<
const
T
>
addRefInternal
(
const
T
*
object
);
template
<
typename
T
>
friend
kj
::
Own
<
T
>
atomicAddRef
(
T
&
object
);
template
<
typename
T
>
friend
kj
::
Own
<
const
T
>
atomicAddRef
(
const
T
&
object
);
template
<
typename
T
>
friend
kj
::
Maybe
<
kj
::
Own
<
const
T
>>
atomicAddRefWeak
(
const
T
&
object
);
template
<
typename
T
,
typename
...
Params
>
friend
kj
::
Own
<
T
>
atomicRefcounted
(
Params
&&
...
params
);
};
template
<
typename
T
,
typename
...
Params
>
inline
kj
::
Own
<
T
>
atomicRefcounted
(
Params
&&
...
params
)
{
return
AtomicRefcounted
::
addRefInternal
(
new
T
(
kj
::
fwd
<
Params
>
(
params
)...));
}
template
<
typename
T
>
kj
::
Own
<
T
>
atomicAddRef
(
T
&
object
)
{
KJ_IREQUIRE
(
object
.
AtomicRefcounted
::
refcount
>
0
,
"Object not allocated with kj::refcounted()."
);
return
AtomicRefcounted
::
addRefInternal
(
&
object
);
}
template
<
typename
T
>
kj
::
Own
<
const
T
>
atomicAddRef
(
const
T
&
object
)
{
KJ_IREQUIRE
(
object
.
AtomicRefcounted
::
refcount
>
0
,
"Object not allocated with kj::refcounted()."
);
return
AtomicRefcounted
::
addRefInternal
(
&
object
);
}
template
<
typename
T
>
kj
::
Maybe
<
kj
::
Own
<
const
T
>>
atomicAddRefWeak
(
const
T
&
object
)
{
// Try to addref an object whose refcount could have already reached zero in another thread, and
// whose destructor could therefore already have started executing. The destructor must contain
// some synchronization that guarantees that said destructor has not yet completed when
// attomicAddRefWeak() is called (so that the object is still valid). Since the destructor cannot
// be canceled once it has started, in the case that it has already started, this function
// returns nullptr.
const
AtomicRefcounted
*
refcounted
=
&
object
;
if
(
refcounted
->
addRefWeakInternal
())
{
return
kj
::
Own
<
const
T
>
(
&
object
,
*
refcounted
);
}
else
{
return
nullptr
;
}
}
template
<
typename
T
>
kj
::
Own
<
T
>
AtomicRefcounted
::
addRefInternal
(
T
*
object
)
{
AtomicRefcounted
*
refcounted
=
object
;
__atomic_add_fetch
(
&
refcounted
->
refcount
,
1
,
__ATOMIC_RELAXED
);
return
kj
::
Own
<
T
>
(
object
,
*
refcounted
);
}
template
<
typename
T
>
kj
::
Own
<
const
T
>
AtomicRefcounted
::
addRefInternal
(
const
T
*
object
)
{
const
AtomicRefcounted
*
refcounted
=
object
;
__atomic_add_fetch
(
&
refcounted
->
refcount
,
1
,
__ATOMIC_RELAXED
);
return
kj
::
Own
<
const
T
>
(
object
,
*
refcounted
);
}
}
// namespace kj
#endif // KJ_REFCOUNT_H_
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment