Commit 966d25a2 authored by Kenton Varda's avatar Kenton Varda

More RPC protocol WIP.

parent c1e51108
......@@ -93,7 +93,7 @@ public:
return kj::addRef(*this);
}
void* getBrand() const override {
const void* getBrand() const override {
return nullptr;
}
......
......@@ -276,7 +276,7 @@ public:
return kj::addRef(*this);
}
void* getBrand() const override {
const void* getBrand() const override {
return nullptr;
}
......@@ -414,7 +414,7 @@ public:
return kj::addRef(*this);
}
void* getBrand() const override {
const void* getBrand() const override {
// We have no need to detect local objects.
return nullptr;
}
......
......@@ -326,7 +326,7 @@ public:
virtual kj::Own<const ClientHook> addRef() const = 0;
// Return a new reference to the same capability.
virtual void* getBrand() const = 0;
virtual const void* getBrand() const = 0;
// Returns a void* that identifies who made this client. This can be used by an RPC adapter to
// discover when a capability it needs to marshal is one that it created in the first place, and
// therefore it can transfer the capability without proxying.
......
......@@ -219,6 +219,15 @@ inline constexpr uint64_t typeId() { return _::TypeId_<T>::typeId; }
// typeId<MyType>() returns the type ID as defined in the schema. Works with structs, enums, and
// interfaces.
template <typename T>
inline constexpr uint sizeInWords() {
// Return the size, in words, of a Struct type, if allocated free-standing (not in a list).
// May be useful for pre-computing space needed in order to precisely allocate messages.
return (WordCount32(_::structSize<T>().data) +
_::structSize<T>().pointers * WORDS_PER_POINTER) / WORDS;
}
} // namespace capnp
#define CAPNP_DECLARE_ENUM(type, id) \
......
......@@ -2247,6 +2247,10 @@ const word* PointerReader::getUnchecked() const {
return reinterpret_cast<const word*>(pointer);
}
WordCount64 PointerReader::targetSize() const {
return WireHelpers::totalSize(segment, pointer, nestingLimit);
}
bool PointerReader::isNull() const {
return pointer == nullptr || pointer->isNull();
}
......
......@@ -355,6 +355,13 @@ public:
static inline PointerReader getRootUnchecked(const word* location);
// Get a PointerReader for an unchecked message.
WordCount64 targetSize() const;
// Return the total size of the target object and everything to which it points. Does not count
// far pointer overhead. This is useful for deciding how much space is needed to copy the object
// into a flat array. However, the caller is advised NOT to treat this value as secure. Instead,
// use the result as a hint for allocating the first segment, do the copy, and then throw an
// exception if it overruns.
bool isNull() const;
StructReader getStruct(const word* defaultValue) const;
......
......@@ -89,6 +89,9 @@ struct ObjectPointer {
Reader() = default;
inline Reader(_::PointerReader reader): reader(reader) {}
inline size_t targetSizeInWords() const;
// Get the total size, in words, of the target object and all its children.
inline bool isNull() const;
template <typename T>
......@@ -126,6 +129,9 @@ struct ObjectPointer {
inline Builder(decltype(nullptr)) {}
inline Builder(_::PointerBuilder builder): builder(builder) {}
inline size_t targetSizeInWords() const;
// Get the total size, in words, of the target object and all its children.
inline bool isNull();
inline void clear();
......@@ -296,6 +302,10 @@ private:
// =======================================================================================
// Inline implementation details
inline size_t ObjectPointer::Reader::targetSizeInWords() const {
return reader.targetSize() / WORDS;
}
inline bool ObjectPointer::Reader::isNull() const {
return reader.isNull();
}
......@@ -305,6 +315,10 @@ inline ReaderFor<T> ObjectPointer::Reader::getAs() const {
return _::PointerHelpers<T>::get(reader);
}
inline size_t ObjectPointer::Builder::targetSizeInWords() const {
return asReader().targetSizeInWords();
}
inline bool ObjectPointer::Builder::isNull() {
return builder.isNull();
}
......
This diff is collapsed.
......@@ -336,6 +336,10 @@ struct Finish {
# 3) If the answer has not returned yet, the caller no longer cares about the answer, so the
# callee may wish to immediately cancel the operation and send back a Return message with
# "canceled" set.
#
# TODO(soon): Should we separate (1) and (2)? It would be possible and useful to notify the
# server that it doesn't need to keep around the response to service pipeline requests even
# though the caller hasn't yet finished processing the response.
questionId @0 :QuestionId;
# ID of the question whose answer is to be released.
......
......@@ -54,4 +54,22 @@ TEST(Refcount, Basic) {
#endif
}
TEST(Refcount, Weak) {
{
bool b = false;
SetTrueInDestructor obj(&b);
EXPECT_TRUE(tryAddRef(obj) == nullptr);
}
{
bool b = false;
Own<SetTrueInDestructor> ref = kj::refcounted<SetTrueInDestructor>(&b);
KJ_IF_MAYBE(ref2, tryAddRef(*ref)) {
EXPECT_EQ(ref.get(), ref2->get());
} else {
ADD_FAILURE() << "tryAddRef() failed.";
}
}
}
} // namespace kj
......@@ -22,24 +22,39 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "refcount.h"
#include "debug.h"
#include <memory>
namespace kj {
Refcounted::~Refcounted() noexcept(false) {}
Refcounted::~Refcounted() noexcept(false) {
KJ_ASSERT(refcount == 0, "Refcounted object deleted with non-zero refcount.");
}
void Refcounted::disposeImpl(void* pointer) const {
// The load is a fast-path for the common case where this is the last reference. An acquire-load
// is just a regular load on x86. If there is more than one reference, then we need to do a full
// atomic decrement with full memory barrier, because:
// - If this is the final decrement then we need to acquire the object state in order to destroy
// it.
// - If this is not the final decrement then we need to release the object state so that another
// thread may destroy it.
if (__atomic_load_n(&refcount, __ATOMIC_ACQUIRE) == 1 ||
__atomic_sub_fetch(&refcount, 1, __ATOMIC_ACQ_REL) == 0) {
// Need to do a "release" decrement in order to release the object's state to any other thread
// which seeks to destroy it.
if (__atomic_sub_fetch(&refcount, 1, __ATOMIC_RELEASE) == 0) {
// This was the last reference. Acquire the memory so that we can destroy it.
__atomic_thread_fence(__ATOMIC_ACQUIRE);
delete this;
}
}
bool Refcounted::tryAddRefInternal() const {
// We want to increment the refcount, but only if it is non-zero. We have to use a cmpxchg for
// this.
uint old = __atomic_load_n(&refcount, __ATOMIC_RELAXED);
for (;;) {
if (old == 0) {
return false;
}
if (__atomic_compare_exchange_n(&refcount, &old, old + 1, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
return true;
}
}
}
} // namespace kj
......@@ -59,8 +59,12 @@ private:
template <typename T>
static Own<T> addRefInternal(T* object);
bool tryAddRefInternal() const;
template <typename T>
friend Own<T> addRef(T& object);
template <typename T>
friend Maybe<Own<T>> tryAddRef(T& object);
template <typename T, typename... Params>
friend Own<T> refcounted(Params&&... params);
};
......@@ -83,6 +87,23 @@ Own<T> addRef(T& object) {
return Refcounted::addRefInternal(&object);
}
template <typename T>
Maybe<Own<T>> tryAddRef(T& object) {
// Like `addRef`, but if the object's refcount is already zero or if the object was not allocated
// with `refcounted`, returns nullptr. This can be used to implement weak references in a
// thread-safe way: store a (regular, non-owned) pointer to the object, and have the object's
// destructor null out that pointer. To convert the pointer to a full reference, use tryAddRef().
// If it fails, the object is already being destroyed. Be sure to also use some sort of mutex
// locking to synchronize access to the raw pointer, since you'll want the object's destructor
// to block if another thread is currently trying to restore the ref.
if (object.Refcounted::tryAddRefInternal()) {
return Own<T>(&object, kj::implicitCast<const Refcounted&>(object));
} else {
return nullptr;
}
}
template <typename T>
Own<T> Refcounted::addRefInternal(T* object) {
const Refcounted* refcounted = object;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment