Commit fce74ba5 authored by Kenton Varda's avatar Kenton Varda

Iterate on C++ runtime.

parent 1768fc16
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <inttypes.h>
#include <stddef.h>
#ifndef CAPNPROTO_ARENA_H_
#define CAPNPROTO_ARENA_H_
namespace capnproto {
class Segment;
class SegmentAllocator;
class Arena {
public:
Arena(SegmentAllocator* allocator);
~Arena();
const Segment* addReadSegment(uint32_t index, const void* data, size_t size);
const Segment* getSegment(uint32_t index) const;
Segment* getWritableSegment(uint32_t index);
// Get the segment at the given index.
Segment* getSegmentWithAvailable(uint32_t minimumSize);
// Find a segment that has at least the given number of bytes available.
void parseError(const char* message) const;
};
class Segment {
public:
inline void* allocate(uint32_t amount);
inline void* getPtrUnchecked(uint32_t offset);
inline uint32_t getOffset(const void* ptr) const;
inline const void* getPtrChecked(uint32_t offset, uint32_t bytesBefore,
uint32_t bytesAfter) const;
inline Arena* getArena();
inline const Arena* getArena() const;
inline uint32_t getSegmentId() const;
private:
Arena* arena;
uint32_t id;
void* start;
uint32_t size;
uint32_t pos;
int64_t* readLimit;
friend class Arena;
inline Segment(Arena* arena, uint32_t index, void* start, uint32_t size, uint32_t pos,
int64_t* readLimit);
inline ~Segment();
Segment(const Segment& other) = delete;
Segment& operator=(const Segment& other) = delete;
void readLimitReached() const;
// TODO: Do we need mutex locking?
};
class SegmentAllocator {
public:
virtual ~SegmentAllocator();
virtual void* allocate(size_t size);
virtual void free(void* ptr, size_t size);
};
// =======================================================================================
inline Segment::Segment(Arena* arena, uint32_t id, void* start, uint32_t size, uint32_t pos,
int64_t* readLimit)
: arena(arena), id(id), start(start), size(size), pos(pos),
readLimit(readLimit) {}
inline Segment::~Segment() {}
inline void* Segment::allocate(uint32_t amount) {
if (amount > size - pos) {
return nullptr;
} else {
uint32_t offset = pos;
pos += size;
return reinterpret_cast<char*>(start) + offset;
}
}
inline void* Segment::getPtrUnchecked(uint32_t offset) {
return reinterpret_cast<char*>(start) + offset;
}
inline uint32_t Segment::getOffset(const void* ptr) const {
return reinterpret_cast<const char*>(ptr) - reinterpret_cast<const char*>(start);
}
inline const void* Segment::getPtrChecked(uint32_t offset, uint32_t bytesBefore,
uint32_t bytesAfter) const {
// Check bounds. Watch out for overflow and underflow here.
if (offset > size || bytesBefore > offset || bytesAfter > size - offset) {
return nullptr;
} else {
// Enforce the read limit. Synchronization is not necessary because readLimit is just a rough
// counter to prevent infinite loops leading to DoS attacks.
if ((*readLimit -= bytesBefore + bytesAfter) < 0) readLimitReached();
return reinterpret_cast<char*>(start) + offset;
}
}
inline Arena* Segment::getArena() {
return arena;
}
inline const Arena* Segment::getArena() const {
return arena;
}
inline uint32_t Segment::getSegmentId() const {
return id;
}
} // namespace capnproto
#endif // CAPNPROTO_ARENA_H_
......@@ -21,10 +21,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arena.h"
#include "message.h"
namespace capnproto {
Arena::~Arena() {}
MessageReader::~MessageReader() {}
MessageBuilder::~MessageBuilder() {}
} // namespace capnproto
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <inttypes.h>
#include <cstddef>
#include "macros.h"
#ifndef CAPNPROTO_MESSAGE_H_
#define CAPNPROTO_MESSAGE_H_
namespace capnproto {
class SegmentReader;
class SegmentBuilder;
class MessageReader;
class MessageBuilder;
class ReadLimiter;
class MessageReader {
// Abstract interface encapsulating a readable message. By implementing this interface, you can
// control how memory is allocated for the message. Or use MallocMessage to make things easy.
public:
virtual ~MessageReader();
virtual SegmentReader* tryGetSegment(uint32_t index) = 0;
// Gets the segment with the given ID, or return nullptr if no such segment exists.
virtual void reportInvalidData(const char* description) = 0;
// Called to report that the message data is invalid.
//
// Implementations should, ideally, report the error to the sender, if possible. They may also
// want to write a debug message, etc.
//
// Implementations may choose to throw an exception in order to cut short further processing of
// the message. If no exception is thrown, then the caller will attempt to work around the
// invalid data by using a default value instead. This is good enough to guard against
// maliciously-crafted messages (the sender could just as easily have sent a perfectly-valid
// message containing the default value), but in the case of accidentally-corrupted messages this
// behavior may propagate the corruption.
//
// TODO: Give more information about the error, e.g. the segment and offset at which the invalid
// data was encountered, any relevant type/field names if known, etc.
virtual void reportReadLimitReached() = 0;
// Called to report that the read limit has been reached. See ReadLimiter, below.
//
// As with reportInvalidData(), this may throw an exception, and if it doesn't, default values
// will be used in place of the actual message data.
// TODO: Methods to deal with bundled capabilities.
};
class MessageBuilder: public MessageReader {
// Abstract interface encapsulating a writable message. By implementing this interface, you can
// control how memory is allocated for the message. Or use MallocMessage to make things easy.
public:
virtual ~MessageBuilder();
virtual SegmentBuilder* getSegment(uint32_t id) = 0;
// Get the segment with the given id. Crashes or throws an exception if no such segment exists.
virtual SegmentBuilder* getSegmentWithAvailable(uint32_t minimumSize) = 0;
// Get a segment which has at least the given amount of space available, allocating it if
// necessary. Crashes or throws an exception if there is not enough memory.
// TODO: Methods to deal with bundled capabilities.
};
class ReadLimiter {
// Used to keep track of how much data has been processed from a message, and cut off further
// processing if and when a particular limit is reached. This is primarily intended to guard
// against maliciously-crafted messages which contain cycles or overlapping structures. Cycles
// and overlapping are not permitted by the Cap'n Proto format because in many cases they could
// be used to craft a deceptively small message which could consume excessive server resources to
// process, perhaps even sending it into an infinite loop. Actually detecting overlaps would be
// time-consuming, so instead we just keep track of how many bytes worth of data structures the
// receiver has actually dereferenced and error out if this gets too high.
//
// This counting takes place as you call getters (for non-primitive values) on the message
// readers. If you call the same getter twice, the data it returns may be double-counted. This
// should not be a big deal in most cases -- just set the read limit high enough that it will
// only trigger in unreasonable cases.
public:
inline explicit ReadLimiter(); // No limit.
inline explicit ReadLimiter(int64_t limit); // Limit to the given number of bytes.
inline bool canRead(uint32_t amount);
private:
int64_t counter;
};
class SegmentReader {
public:
inline SegmentReader(MessageReader* message, uint32_t id, const void* ptr, uint32_t size,
ReadLimiter* readLimiter);
CAPNPROTO_ALWAYS_INLINE(const void* getPtrChecked(
uint32_t offset, uint32_t bytesBefore, uint32_t bytesAfter));
inline MessageReader* getMessage();
inline uint32_t getSegmentId();
inline const void* getStartPtr();
inline uint32_t getSize();
private:
MessageReader* message;
uint32_t id;
uint32_t size;
const void* start;
ReadLimiter* readLimiter;
SegmentReader(const SegmentReader& other) = delete;
SegmentReader& operator=(const SegmentReader& other) = delete;
void readLimitReached();
friend class SegmentBuilder;
};
class SegmentBuilder: public SegmentReader {
public:
inline SegmentBuilder(MessageBuilder* message, uint32_t id, void* ptr, uint32_t available);
struct Allocation {
void* ptr;
uint32_t offset;
inline Allocation(): ptr(nullptr), offset(0) {}
inline Allocation(std::nullptr_t): ptr(nullptr), offset(0) {}
inline Allocation(void* ptr, uint32_t offset): ptr(ptr), offset(offset) {}
inline bool operator==(std::nullptr_t) const { return ptr == nullptr; }
};
CAPNPROTO_ALWAYS_INLINE(Allocation allocate(uint32_t amount));
inline void* getPtrUnchecked(uint32_t offset);
inline MessageBuilder* getMessage();
private:
char* pos;
char* end;
ReadLimiter dummyLimiter;
SegmentBuilder(const SegmentBuilder& other) = delete;
SegmentBuilder& operator=(const SegmentBuilder& other) = delete;
// TODO: Do we need mutex locking?
};
// =======================================================================================
inline ReadLimiter::ReadLimiter()
// I didn't want to #include <limits> just for this one lousy constant.
: counter(0x7fffffffffffffffll) {}
inline ReadLimiter::ReadLimiter(int64_t limit): counter(limit) {}
inline bool ReadLimiter::canRead(uint32_t amount) {
return (counter -= amount) >= 0;
}
// -------------------------------------------------------------------
inline SegmentReader::SegmentReader(
MessageReader* message, uint32_t id, const void* ptr, uint32_t size, ReadLimiter* readLimiter)
: message(message), id(id), size(size), start(ptr), readLimiter(readLimiter) {}
inline const void* SegmentReader::getPtrChecked(uint32_t offset, uint32_t bytesBefore,
uint32_t bytesAfter) {
// Check bounds. Watch out for overflow and underflow here.
if (offset > size || bytesBefore > offset || bytesAfter > size - offset) {
return nullptr;
} else {
// Enforce the read limit. Synchronization is not necessary because readLimit is just a rough
// counter to prevent infinite loops leading to DoS attacks.
if (CAPNPROTO_EXPECT_FALSE(!readLimiter->canRead(bytesBefore + bytesAfter))) {
message->reportReadLimitReached();
}
return reinterpret_cast<const char*>(start) + offset;
}
}
inline MessageReader* SegmentReader::getMessage() { return message; }
inline uint32_t SegmentReader::getSegmentId() { return id; }
inline const void* SegmentReader::getStartPtr() { return start; }
inline uint32_t SegmentReader::getSize() { return size; }
// -------------------------------------------------------------------
inline SegmentBuilder::SegmentBuilder(
MessageBuilder* message, uint32_t id, void* ptr, uint32_t available)
: SegmentReader(message, id, ptr, 0, &dummyLimiter),
pos(reinterpret_cast<char*>(ptr)),
end(pos + available) {}
inline SegmentBuilder::Allocation SegmentBuilder::allocate(uint32_t amount) {
if (amount > end - pos) {
return nullptr;
} else {
char* result = pos;
pos += amount;
size += amount;
return Allocation(result, result - reinterpret_cast<const char*>(start));
}
}
inline void* SegmentBuilder::getPtrUnchecked(uint32_t offset) {
// const_cast OK because SegmentBuilder's constructor always initializes its SegmentReader base
// class with a pointer that was originally non-const.
return const_cast<char*>(reinterpret_cast<const char*>(start) + offset);
}
inline MessageBuilder* SegmentBuilder::getMessage() {
// Down-cast safe because SegmentBuilder's constructor always initializes its SegmentReader base
// class with a MessageReader pointer that actually points to a MessageBuilder.
return static_cast<MessageBuilder*>(message);
}
} // namespace capnproto
#endif // CAPNPROTO_MESSAGE_H_
......@@ -22,12 +22,12 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "wire-format.h"
#include "arena.h"
#include "message.h"
#include "descriptor.h"
namespace capnproto {
namespace internal {
namespace debug {
bool fieldIsStruct(const StructDescriptor* descriptor, int fieldNumber, int refIndex) {
......@@ -91,7 +91,6 @@ bool elementsAreData(const ListDescriptor* descriptor, int bitSize) {
}
} // namespace debug
} // namespace internal
// =======================================================================================
......@@ -125,8 +124,8 @@ struct WireReference {
struct {
WireValue<uint32_t> elementSizeAndCount;
CAPNPROTO_ALWAYS_INLINE(internal::FieldSize elementSize() const) {
return static_cast<internal::FieldSize>(elementSizeAndCount.get() >> 29);
CAPNPROTO_ALWAYS_INLINE(FieldSize elementSize() const) {
return static_cast<FieldSize>(elementSizeAndCount.get() >> 29);
}
CAPNPROTO_ALWAYS_INLINE(uint32_t elementCount() const) {
return elementSizeAndCount.get() & 0x1fffffffu;
......@@ -147,8 +146,7 @@ struct WireReference {
offsetAndTag.set(offset | tag);
}
CAPNPROTO_ALWAYS_INLINE(void setStruct(
const internal::StructDescriptor* descriptor, uint32_t offset)) {
CAPNPROTO_ALWAYS_INLINE(void setStruct(const StructDescriptor* descriptor, uint32_t offset)) {
setTagAndOffset(STRUCT, offset);
structRef.fieldCount.set(descriptor->fieldCount);
structRef.dataSize.set(descriptor->dataSize);
......@@ -157,7 +155,7 @@ struct WireReference {
}
CAPNPROTO_ALWAYS_INLINE(void setList(
const internal::ListDescriptor* descriptor, uint32_t elementCount, uint32_t offset)) {
const ListDescriptor* descriptor, uint32_t elementCount, uint32_t offset)) {
setTagAndOffset(LIST, offset);
CAPNPROTO_DEBUG_ASSERT((elementCount >> 29) == 0, "Lists are limited to 2**29 elements.");
listRef.elementSizeAndCount.set(
......@@ -190,61 +188,71 @@ static inline void* offsetPtr(void* ptr, int amount) {
return reinterpret_cast<T*>(ptr) + amount;
}
template <typename T>
static inline T* takeFromAllocation(SegmentBuilder::Allocation& allocation, int count = 1) {
T* result = reinterpret_cast<T*>(allocation.ptr);
allocation.ptr = result + count;
allocation.offset += sizeof(T) * count;
return result;
}
struct WireHelpers {
static CAPNPROTO_ALWAYS_INLINE(void* allocate(
WireReference*& ref, Segment*& segment, uint32_t size)) {
void* ptr = segment->allocate(size);
static CAPNPROTO_ALWAYS_INLINE(SegmentBuilder::Allocation allocate(
WireReference*& ref, SegmentBuilder*& segment, uint32_t size)) {
SegmentBuilder::Allocation allocation = segment->allocate(size);
if (ptr == nullptr) {
if (allocation == nullptr) {
// Need to allocate in a new segment.
// Loop here just in case we ever make Segment::allocate() thread-safe -- in this case another
// thread could have grabbed the space between when we asked the arena for the segment and
// thread could have grabbed the space between when we asked the message for the segment and
// when we asked the segment to allocate space.
do {
segment = segment->getArena()->getSegmentWithAvailable(size + sizeof(WireReference));
ptr = segment->allocate(size + sizeof(WireReference));
} while (CAPNPROTO_EXPECT_FALSE(ptr == nullptr));
segment = segment->getMessage()->getSegmentWithAvailable(size + sizeof(WireReference));
allocation = segment->allocate(size + sizeof(WireReference));
} while (CAPNPROTO_EXPECT_FALSE(allocation == nullptr));
ref->setFar(segment->getSegmentId(), segment->getOffset(ptr));
ref = reinterpret_cast<WireReference*>(ptr);
ref->setFar(segment->getSegmentId(), allocation.offset);
ref = takeFromAllocation<WireReference>(allocation);
// Allocated space follows new reference.
return ref + 1;
return allocation;
} else {
return ptr;
return allocation;
}
}
static CAPNPROTO_ALWAYS_INLINE(void followFars(WireReference*& ref, Segment*& segment)) {
static CAPNPROTO_ALWAYS_INLINE(void followFars(WireReference*& ref, SegmentBuilder*& segment)) {
if (ref->tag() == WireReference::FAR) {
segment = segment->getArena()->getWritableSegment(ref->farRef.segmentId.get());
segment = segment->getMessage()->getSegment(ref->farRef.segmentId.get());
ref = reinterpret_cast<WireReference*>(segment->getPtrUnchecked(ref->offset()));
}
}
static CAPNPROTO_ALWAYS_INLINE(bool followFars(
const WireReference*& ref, const Segment*& segment)) {
const WireReference*& ref, SegmentReader*& segment)) {
if (ref->tag() == WireReference::FAR) {
segment = segment->getArena()->getSegment(ref->farRef.segmentId.get());
segment = segment->getMessage()->tryGetSegment(ref->farRef.segmentId.get());
if (CAPNPROTO_EXPECT_FALSE(segment == nullptr)) {
return false;
}
ref = reinterpret_cast<const WireReference*>(
segment->getPtrChecked(ref->offset(), 0, sizeof(WireReference)));
return CAPNPROTO_EXPECT_TRUE(ref != nullptr);
return ref != nullptr;
} else {
return true;
}
}
static CAPNPROTO_ALWAYS_INLINE(bool isStructCompatible(
const internal::StructDescriptor* descriptor, const WireReference* ref)) {
const StructDescriptor* descriptor, const WireReference* ref)) {
if (ref->structRef.fieldCount.get() >= descriptor->fieldCount) {
// The incoming struct has all of the fields that we know about.
return ref->structRef.dataSize.get() >= descriptor->dataSize &&
ref->structRef.refCount.get() >= descriptor->referenceCount;
} else if (ref->structRef.fieldCount.get() > 0) {
// We know about more fields than the struct has, and the struct is non-empty.
const internal::FieldDescriptor* field =
&descriptor->fields[ref->structRef.fieldCount.get() - 1];
const FieldDescriptor* field = &descriptor->fields[ref->structRef.fieldCount.get() - 1];
return ref->structRef.dataSize.get() >= field->requiredDataSize &&
ref->structRef.refCount.get() >= field->requiredReferenceSize;
} else {
......@@ -254,22 +262,22 @@ struct WireHelpers {
}
static CAPNPROTO_ALWAYS_INLINE(StructPtr initStructReference(
const internal::StructDescriptor* descriptor, WireReference* ref, Segment* segment)) {
const StructDescriptor* descriptor, WireReference* ref, SegmentBuilder* segment)) {
if (ref->isNull()) {
// Calculate the size of the struct.
uint32_t size = (descriptor->dataSize + descriptor->referenceCount) * sizeof(uint64_t);
// Allocate space for the new struct.
void* ptr = allocate(ref, segment, size);
SegmentBuilder::Allocation allocation = allocate(ref, segment, size);
// Advance the pointer to point between the data and reference segments.
ptr = offsetPtr<uint64_t>(ptr, descriptor->dataSize);
takeFromAllocation<uint64_t>(allocation, descriptor->dataSize);
// Initialize the reference.
ref->setStruct(descriptor, segment->getOffset(ptr));
ref->setStruct(descriptor, allocation.offset);
// Build the StructPtr.
return StructPtr(descriptor, segment, ptr);
return StructPtr(descriptor, segment, allocation.ptr);
} else {
followFars(ref, segment);
......@@ -287,45 +295,47 @@ struct WireHelpers {
}
static CAPNPROTO_ALWAYS_INLINE(ListPtr initListReference(
const internal::ListDescriptor* descriptor, WireReference* ref,
Segment* segment, uint32_t elementCount)) {
if (descriptor->elementSize == internal::FieldSize::STRUCT) {
const internal::StructDescriptor* elementDescriptor =
const ListDescriptor* descriptor, WireReference* ref,
SegmentBuilder* segment, uint32_t elementCount)) {
if (descriptor->elementSize == FieldSize::STRUCT) {
const StructDescriptor* elementDescriptor =
descriptor->elementDescriptor->asStruct();
// Allocate the list, prefixed by a single WireReference.
WireReference* structRef = reinterpret_cast<WireReference*>(
SegmentBuilder::Allocation allocation =
allocate(ref, segment, sizeof(WireReference) +
elementDescriptor->wordSize() * elementCount * sizeof(uint64_t)));
void* ptr = offsetPtr<uint64_t>(structRef + 1, elementDescriptor->dataSize);
elementDescriptor->wordSize() * elementCount * sizeof(uint64_t));
// Initialize the reference.
ref->setList(descriptor, elementCount, segment->getOffset(structRef));
ref->setList(descriptor, elementCount, allocation.offset);
WireReference* structRef = takeFromAllocation<WireReference>(allocation);
// Skip past the data segment of the first struct.
takeFromAllocation<uint64_t>(allocation, elementDescriptor->dataSize);
// Initialize the struct reference.
structRef->setStruct(elementDescriptor, segment->getOffset(ptr));
structRef->setStruct(elementDescriptor, allocation.offset);
// Build the ListPtr.
return ListPtr(descriptor, segment, ptr, elementCount);
return ListPtr(descriptor, segment, structRef, elementCount);
} else {
// Calculate size of the list.
uint32_t size = divRoundingUp<uint32_t>(
static_cast<uint32_t>(sizeInBits(descriptor->elementSize)) * elementCount, 8);
// Allocate the list.
void* ptr = allocate(ref, segment, size);
SegmentBuilder::Allocation allocation = allocate(ref, segment, size);
// Initialize the reference.
ref->setList(descriptor, elementCount, segment->getOffset(ptr));
ref->setList(descriptor, elementCount, allocation.offset);
// Build the ListPtr.
return ListPtr(descriptor, segment, ptr, elementCount);
return ListPtr(descriptor, segment, allocation.ptr, elementCount);
}
}
static CAPNPROTO_ALWAYS_INLINE(ListPtr getWritableListReference(
const internal::ListDescriptor* descriptor, WireReference* ref,
Segment* segment)) {
const ListDescriptor* descriptor, WireReference* ref, SegmentBuilder* segment)) {
if (ref->isNull()) {
return ListPtr(descriptor, segment, nullptr, 0);
}
......@@ -335,7 +345,7 @@ struct WireHelpers {
CAPNPROTO_ASSERT(ref->tag() == WireReference::LIST,
"Called getList{Field,Element}() but existing reference is not a list.");
if (descriptor->elementSize == internal::FieldSize::STRUCT) {
if (descriptor->elementSize == FieldSize::STRUCT) {
WireReference* structRef = reinterpret_cast<WireReference*>(
segment->getPtrUnchecked(ref->offset()));
return ListPtr(descriptor, segment,
......@@ -347,33 +357,33 @@ struct WireHelpers {
}
static CAPNPROTO_ALWAYS_INLINE(StructReadPtr readStructReference(
const internal::StructDescriptor* descriptor, const WireReference* ref,
const Segment* segment, int recursionLimit)) {
const StructDescriptor* descriptor, const WireReference* ref,
SegmentReader* segment, int recursionLimit)) {
do {
if (ref == nullptr || ref->isNull()) {
break;
}
if (CAPNPROTO_EXPECT_FALSE(recursionLimit == 0)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message is too deeply-nested or contains cycles.");
break;
}
if (CAPNPROTO_EXPECT_FALSE(!followFars(ref, segment))) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains out-of-bounds far reference.");
break;
}
if (CAPNPROTO_EXPECT_FALSE(ref->tag() != WireReference::STRUCT)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains non-struct reference where struct reference was expected.");
break;
}
if (CAPNPROTO_EXPECT_FALSE(!isStructCompatible(descriptor, ref))) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains struct that is too small for its field count.");
break;
}
......@@ -383,7 +393,8 @@ struct WireHelpers {
ref->structRef.refCount.get() * sizeof(WireReference));
if (CAPNPROTO_EXPECT_FALSE(ptr == nullptr)) {
segment->getArena()->parseError("Message contained out-of-bounds struct reference.");
segment->getMessage()->reportInvalidData(
"Message contained out-of-bounds struct reference.");
break;
}
......@@ -395,38 +406,38 @@ struct WireHelpers {
}
static CAPNPROTO_ALWAYS_INLINE(ListReadPtr readListReference(
const internal::ListDescriptor* descriptor, const WireReference* ref, const Segment* segment,
int recursionLimit)) {
const ListDescriptor* descriptor, const WireReference* ref,
SegmentReader* segment, int recursionLimit)) {
do {
if (ref == nullptr || ref->isNull()) {
break;
}
if (CAPNPROTO_EXPECT_FALSE(recursionLimit == 0)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message is too deeply-nested or contains cycles.");
break;
}
if (CAPNPROTO_EXPECT_FALSE(!followFars(ref, segment))) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains out-of-bounds far reference.");
break;
}
if (CAPNPROTO_EXPECT_FALSE(ref->tag() != WireReference::LIST)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains non-list reference where list reference was expected.");
break;
}
if (ref->listRef.elementSize() == internal::FieldSize::STRUCT) {
if (ref->listRef.elementSize() == FieldSize::STRUCT) {
// A struct list reference actually points to a struct reference which in turn points to the
// first struct in the list.
const void* ptrPtr =
segment->getPtrChecked(ref->offset(), 0, sizeof(WireReference));
if (CAPNPROTO_EXPECT_FALSE(ptrPtr == nullptr)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains out-of-bounds list reference.");
break;
}
......@@ -435,7 +446,7 @@ struct WireHelpers {
ref = reinterpret_cast<const WireReference*>(ptrPtr);
if (CAPNPROTO_EXPECT_FALSE(ref->tag() != WireReference::STRUCT)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains struct list reference that does not point to a struct reference.");
break;
}
......@@ -447,7 +458,7 @@ struct WireHelpers {
ref->structRef.refCount.get() * sizeof(WireReference) +
step * (size - 1));
if (CAPNPROTO_EXPECT_FALSE(ptr == nullptr)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message contains out-of-bounds struct list reference.");
break;
}
......@@ -461,31 +472,31 @@ struct WireHelpers {
// Check whether the size is compatible.
bool compatible = true;
switch (descriptor->elementSize) {
case internal::FieldSize::BIT:
case internal::FieldSize::BYTE:
case internal::FieldSize::TWO_BYTES:
case internal::FieldSize::FOUR_BYTES:
case internal::FieldSize::EIGHT_BYTES:
case FieldSize::BIT:
case FieldSize::BYTE:
case FieldSize::TWO_BYTES:
case FieldSize::FOUR_BYTES:
case FieldSize::EIGHT_BYTES:
compatible = ref->structRef.dataSize.get() > 0;
break;
case internal::FieldSize::REFERENCE:
case FieldSize::REFERENCE:
compatible = ref->structRef.refCount.get() > 0;
break;
case internal::FieldSize::KEY_REFERENCE:
case FieldSize::KEY_REFERENCE:
compatible = ref->structRef.dataSize.get() > 0 &&
ref->structRef.refCount.get() > 0;
break;
case internal::FieldSize::STRUCT: {
case FieldSize::STRUCT: {
compatible = isStructCompatible(descriptor->elementDescriptor->asStruct(), ref);
break;
}
}
if (CAPNPROTO_EXPECT_FALSE(!compatible)) {
segment->getArena()->parseError("A list had incompatible element type.");
segment->getMessage()->reportInvalidData("A list had incompatible element type.");
break;
}
......@@ -501,24 +512,24 @@ struct WireHelpers {
implicit_cast<uint64_t>(ref->listRef.elementCount()) * step, 8));
if (CAPNPROTO_EXPECT_FALSE(ptr == nullptr)) {
segment->getArena()->parseError("Message contained out-of-bounds list reference.");
segment->getMessage()->reportInvalidData(
"Message contained out-of-bounds list reference.");
break;
}
if (descriptor->elementSize == ref->listRef.elementSize()) {
return ListReadPtr(descriptor, segment, ptr, ref->listRef.elementCount(),
sizeInBits(ref->listRef.elementSize()), 0, recursionLimit);
} else if (descriptor->elementSize == internal::FieldSize::STRUCT) {
} else if (descriptor->elementSize == FieldSize::STRUCT) {
// We were expecting a struct, but we received a list of some other type. Perhaps a
// non-struct list was recently upgraded to a struct list, but the sender is using the
// old version of the protocol. We need to verify that the struct's first field matches
// what the sender sent us.
const internal::StructDescriptor* elementDescriptor =
descriptor->elementDescriptor->asStruct();
const StructDescriptor* elementDescriptor = descriptor->elementDescriptor->asStruct();
if (CAPNPROTO_EXPECT_FALSE(
elementDescriptor->fieldCount == 0 ||
elementDescriptor->fields[0].size != ref->listRef.elementSize())) {
segment->getArena()->parseError("A list had incompatible element type.");
segment->getMessage()->reportInvalidData("A list had incompatible element type.");
break;
}
......@@ -528,21 +539,21 @@ struct WireHelpers {
return ListReadPtr(descriptor, segment, ptr, ref->listRef.elementCount(),
sizeInBits(ref->listRef.elementSize()), 1, recursionLimit);
} else {
segment->getArena()->parseError("A list had incompatible element type.");
segment->getMessage()->reportInvalidData("A list had incompatible element type.");
break;
}
}
} while (false);
switch (descriptor->elementSize) {
case internal::FieldSize::REFERENCE:
case internal::FieldSize::KEY_REFERENCE:
case internal::FieldSize::STRUCT:
case FieldSize::REFERENCE:
case FieldSize::KEY_REFERENCE:
case FieldSize::STRUCT:
return ListReadPtr(descriptor, segment, nullptr, descriptor->defaultCount, 0, 0,
recursionLimit - 1);
default:
return ListReadPtr(descriptor, segment, descriptor->defaultData, descriptor->defaultCount,
internal::sizeInBits(descriptor->elementSize), 0, recursionLimit - 1);
sizeInBits(descriptor->elementSize), 0, recursionLimit - 1);
}
}
};
......@@ -612,21 +623,21 @@ ListPtr ListPtr::getListElementInternal(unsigned int index) const {
ListReadPtr ListPtr::asReadPtr() const {
return ListReadPtr(descriptor, segment, ptr, elementCount,
internal::sizeInBits(descriptor->elementSize),
descriptor->elementSize == internal::FieldSize::STRUCT
sizeInBits(descriptor->elementSize),
descriptor->elementSize == FieldSize::STRUCT
? descriptor->elementDescriptor->asStruct()->fieldCount : 0,
1 << 30);
}
StructReadPtr ListReadPtr::getStructElementInternal(unsigned int index) const {
const internal::StructDescriptor* elementDescriptor;
const StructDescriptor* elementDescriptor;
if (ptr == nullptr) {
elementDescriptor = descriptor->defaultReferences()[index]->asStruct();
} else {
elementDescriptor = descriptor->elementDescriptor->asStruct();
if (CAPNPROTO_EXPECT_FALSE(recursionLimit == 0)) {
segment->getArena()->parseError(
segment->getMessage()->reportInvalidData(
"Message is too deeply-nested or contains cycles.");
} else {
uint64_t indexBit = static_cast<uint64_t>(index) * stepBits;
......@@ -652,4 +663,5 @@ ListReadPtr ListReadPtr::getListElementInternal(unsigned int index, uint32_t siz
}
}
} // namespace internal
} // namespace capnproto
......@@ -21,6 +21,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is NOT intended for use by clients, except in generated code.
//
// This file defines low-level, non-type-safe classes for interpreting the raw Cap'n Proto wire
// format. Code generated by the Cap'n Proto compiler uses these classes, as does other parts of
// the Cap'n proto library which provide a higher-level interface for dynamic introspection.
#ifndef CAPNPROTO_WIRE_FORMAT_H_
#define CAPNPROTO_WIRE_FORMAT_H_
......@@ -28,43 +34,71 @@
#include "macros.h"
namespace capnproto {
class SegmentReader;
class SegmentBuilder;
}
namespace capnproto {
namespace internal {
class Descriptor;
class StructDescriptor;
class ListDescriptor;
namespace debug {
// These functions are only called inside debug asserts. They are defined out-of-line so that
// we don't have to #include descriptor.h from here, which is arguably important because this
// header is #included from generated headers, whereas descriptor.h is only #included in generated
// source files.
bool fieldIsStruct(const StructDescriptor* descriptor, int fieldNumber, int refIndex);
bool fieldIsList(const StructDescriptor* descriptor, int fieldNumber, int refIndex);
bool fieldIsData(const StructDescriptor* descriptor, int fieldNumber, int dataOffset,
int bitSize);
bool dataFieldInRange(const StructDescriptor* descriptor, uint32_t dataOffset, uint32_t size);
bool bitFieldInRange(const StructDescriptor* descriptor, uint32_t offset);
bool refFieldIsStruct(const StructDescriptor* descriptor, int refIndex);
bool refFieldIsList(const StructDescriptor* descriptor, int refIndex);
bool elementsAreStructs(const ListDescriptor* descriptor);
bool elementsAreStructs(const ListDescriptor* descriptor, uint32_t wordSize);
bool elementsAreLists(const ListDescriptor* descriptor);
bool elementsAreData(const ListDescriptor* descriptor, int bitSize);
} // namespace debug
class Arena;
class Segment;
class StructPtr;
class StructReadPtr;
class ListPtr;
class ListReadPtr;
class Capability;
struct WireReference;
struct WireHelpers;
namespace internal {
class Descriptor;
class StructDescriptor;
class ListDescriptor;
namespace debug {
// These functions are only called inside debug asserts. They are defined out-of-line so that
// we don't have to #include descriptor.h from here.
bool fieldIsStruct(const StructDescriptor* descriptor, int fieldNumber, int refIndex);
bool fieldIsList(const StructDescriptor* descriptor, int fieldNumber, int refIndex);
bool fieldIsData(const StructDescriptor* descriptor, int fieldNumber, int dataOffset,
int bitSize);
bool dataFieldInRange(const StructDescriptor* descriptor, uint32_t dataOffset, uint32_t size);
bool bitFieldInRange(const StructDescriptor* descriptor, uint32_t offset);
bool refFieldIsStruct(const StructDescriptor* descriptor, int refIndex);
bool refFieldIsList(const StructDescriptor* descriptor, int refIndex);
bool elementsAreStructs(const ListDescriptor* descriptor);
bool elementsAreStructs(const ListDescriptor* descriptor, uint32_t wordSize);
bool elementsAreLists(const ListDescriptor* descriptor);
bool elementsAreData(const ListDescriptor* descriptor, int bitSize);
} // namespace debug
} // namespace internal
// -------------------------------------------------------------------
template <typename T>
class WireValue {
// Wraps a primitive value as it appears on the wire. Namely, values are little-endian on the
// wire, because little-endian is the most common endianness in modern CPUs.
//
// TODO: On big-endian systems, inject byte-swapping here. Most big-endian CPUs implement
// dedicated instructions for this, so use those rather than writing a bunch of shifts and
// masks. Note that GCC has e.g. __builtin__bswap32() for this.
//
// Note: In general, code that depends cares about byte ordering is bad. See:
// http://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
// Cap'n Proto is special because it is essentially doing compiler-like things, fussing over
// allocation and layout of memory, in order to squeeze out every last drop of performance.
public:
CAPNPROTO_ALWAYS_INLINE(WireValue()) {}
CAPNPROTO_ALWAYS_INLINE(WireValue(T value)): value(value) {}
CAPNPROTO_ALWAYS_INLINE(T get() const) { return value; }
CAPNPROTO_ALWAYS_INLINE(void set(T newValue)) { value = newValue; }
private:
T value;
};
class StructPtr {
public:
template <typename T>
......@@ -92,11 +126,11 @@ public:
// Get a StructReadPtr pointing at the same memory.
private:
const internal::StructDescriptor* descriptor; // Descriptor for the struct.
Segment* segment; // Memory segment in which the struct resides.
const StructDescriptor* descriptor; // Descriptor for the struct.
SegmentBuilder* segment; // Memory segment in which the struct resides.
void* ptr; // Pointer to the location between the struct's data and reference segments.
inline StructPtr(const internal::StructDescriptor* descriptor, Segment* segment, void* ptr)
inline StructPtr(const StructDescriptor* descriptor, SegmentBuilder* segment, void* ptr)
: descriptor(descriptor), segment(segment), ptr(ptr) {}
StructPtr getStructFieldInternal(int refIndex) const;
......@@ -127,8 +161,8 @@ public:
// initialized.
private:
const internal::StructDescriptor* descriptor; // Descriptor for the struct.
const Segment* segment; // Memory segment in which the struct resides.
const StructDescriptor* descriptor; // Descriptor for the struct.
SegmentReader* segment; // Memory segment in which the struct resides.
const void* ptr[2];
// ptr[0] points to the location between the struct's data and reference segments.
......@@ -146,7 +180,7 @@ private:
// Limits the depth of message structures to guard against stack-overflow-based DoS attacks.
// Once this reaches zero, further pointers will be pruned.
inline StructReadPtr(const internal::StructDescriptor* descriptor, const Segment* segment,
inline StructReadPtr(const StructDescriptor* descriptor, SegmentReader* segment,
const void* ptr, const void* defaultData, int fieldCount, int bit0Offset,
int recursionLimit)
: descriptor(descriptor), segment(segment), ptr{ptr, defaultData}, fieldCount(fieldCount),
......@@ -193,12 +227,12 @@ public:
// Get a ListReadPtr pointing at the same memory.
private:
const internal::ListDescriptor* descriptor; // Descriptor for the list.
Segment* segment; // Memory segment in which the list resides.
const ListDescriptor* descriptor; // Descriptor for the list.
SegmentBuilder* segment; // Memory segment in which the list resides.
void* ptr; // Pointer to the beginning of the list.
uint32_t elementCount; // Number of elements in the list.
inline ListPtr(const internal::ListDescriptor* descriptor, Segment* segment,
inline ListPtr(const ListDescriptor* descriptor, SegmentBuilder* segment,
void* ptr, uint32_t size)
: descriptor(descriptor), segment(segment), ptr(ptr), elementCount(size) {}
......@@ -229,8 +263,8 @@ public:
// Get the list element at the given index.
private:
const internal::ListDescriptor* descriptor; // Descriptor for the list.
const Segment* segment; // Memory segment in which the list resides.
const ListDescriptor* descriptor; // Descriptor for the list.
SegmentReader* segment; // Memory segment in which the list resides.
const void* ptr;
// Pointer to the data. If NULL, use defaultReferences. (Never NULL for data lists.)
......@@ -249,7 +283,7 @@ private:
// Limits the depth of message structures to guard against stack-overflow-based DoS attacks.
// Once this reaches zero, further pointers will be pruned.
inline ListReadPtr(const internal::ListDescriptor* descriptor, const Segment* segment,
inline ListReadPtr(const ListDescriptor* descriptor, SegmentReader* segment,
const void* ptr, uint32_t size, int stepBits, int structFieldCount,
int recursionLimit)
: descriptor(descriptor), segment(segment), ptr(ptr), elementCount(size), stepBits(stepBits),
......@@ -269,37 +303,9 @@ private:
// =======================================================================================
// Internal implementation details...
template <typename T>
class WireValue {
// Wraps a primitive value as it appears on the wire. Namely, values are little-endian on the
// wire, because little-endian is the most common endianness in modern CPUs.
//
// TODO: On big-endian systems, inject byte-swapping here. Most big-endian CPUs implement
// dedicated instructions for this, so use those rather than writing a bunch of shifts and
// masks. Note that GCC has e.g. __builtin__bswap32() for this.
//
// Note: In general, code that depends cares about byte ordering is bad. See:
// http://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
// Cap'n Proto is special because it is essentially doing compiler-like things, fussing over
// allocation and layout of memory, in order to squeeze out every last drop of performance.
public:
CAPNPROTO_ALWAYS_INLINE(WireValue()) {}
CAPNPROTO_ALWAYS_INLINE(WireValue(T value)): value(value) {}
CAPNPROTO_ALWAYS_INLINE(T get() const) { return value; }
CAPNPROTO_ALWAYS_INLINE(void set(T newValue)) { value = newValue; }
private:
T value;
};
// -------------------------------------------------------------------
template <typename T>
inline T StructPtr::getDataField(unsigned int offset) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::dataFieldInRange(descriptor, offset, sizeof(T)),
CAPNPROTO_DEBUG_ASSERT(debug::dataFieldInRange(descriptor, offset, sizeof(T)),
"StructPtr::getDataField() type mismatch.");
return reinterpret_cast<WireValue<T>*>(ptr)[-offset].get();
......@@ -307,8 +313,7 @@ inline T StructPtr::getDataField(unsigned int offset) const {
template <>
inline bool StructPtr::getDataField<bool>(unsigned int offset) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::bitFieldInRange(descriptor, offset),
CAPNPROTO_DEBUG_ASSERT(debug::bitFieldInRange(descriptor, offset),
"StructPtr::getDataField<bool>() type mismatch.");
uint8_t byte = *(reinterpret_cast<uint8_t*>(ptr) - (offset / 8) - 1);
return (byte & (1 << (offset % 8))) != 0;
......@@ -316,16 +321,14 @@ inline bool StructPtr::getDataField<bool>(unsigned int offset) const {
template <typename T>
inline void StructPtr::setDataField(unsigned int offset, T value) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::dataFieldInRange(descriptor, offset, sizeof(T)),
CAPNPROTO_DEBUG_ASSERT(debug::dataFieldInRange(descriptor, offset, sizeof(T)),
"StructPtr::setDataField() type mismatch.");
reinterpret_cast<WireValue<T>*>(ptr)[-offset].set(value);
}
template <>
inline void StructPtr::setDataField<bool>(unsigned int offset, bool value) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::bitFieldInRange(descriptor, offset),
CAPNPROTO_DEBUG_ASSERT(debug::bitFieldInRange(descriptor, offset),
"StructPtr::setDataField<bool>() type mismatch.");
uint8_t* byte = reinterpret_cast<uint8_t*>(ptr) - (offset / 8) - 1;
*byte = (*byte & ~(1 << (offset % 8)))
......@@ -333,19 +336,19 @@ inline void StructPtr::setDataField<bool>(unsigned int offset, bool value) const
}
inline StructPtr StructPtr::getStructField(int refIndex) const {
CAPNPROTO_DEBUG_ASSERT(internal::debug::refFieldIsStruct(descriptor, refIndex),
CAPNPROTO_DEBUG_ASSERT(debug::refFieldIsStruct(descriptor, refIndex),
"StructPtr::getStructField() type mismatch.");
return getStructFieldInternal(refIndex);
}
inline ListPtr StructPtr::initListField(int refIndex, uint32_t elementCount) const {
CAPNPROTO_DEBUG_ASSERT(internal::debug::refFieldIsList(descriptor, refIndex),
CAPNPROTO_DEBUG_ASSERT(debug::refFieldIsList(descriptor, refIndex),
"StructPtr::initListField() type mismatch.");
return initListFieldInternal(refIndex, elementCount);
}
inline ListPtr StructPtr::getListField(int refIndex) const {
CAPNPROTO_DEBUG_ASSERT(internal::debug::refFieldIsList(descriptor, refIndex),
CAPNPROTO_DEBUG_ASSERT(debug::refFieldIsList(descriptor, refIndex),
"StructPtr::initListField() type mismatch.");
return getListFieldInternal(refIndex);
}
......@@ -354,8 +357,7 @@ inline ListPtr StructPtr::getListField(int refIndex) const {
template <typename T>
T StructReadPtr::getDataField(int fieldNumber, unsigned int offset) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::fieldIsData(descriptor, fieldNumber, offset, sizeof(T) * 8),
CAPNPROTO_DEBUG_ASSERT(debug::fieldIsData(descriptor, fieldNumber, offset, sizeof(T) * 8),
"StructReadPtr::getDataField() type mismatch.");
const void* dataPtr = ptr[fieldNumber >= fieldCount];
return reinterpret_cast<WireValue<T>*>(dataPtr)[-offset].get();
......@@ -363,8 +365,7 @@ T StructReadPtr::getDataField(int fieldNumber, unsigned int offset) const {
template <>
inline bool StructReadPtr::getDataField<bool>(int fieldNumber, unsigned int offset) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::fieldIsData(descriptor, fieldNumber, offset, 1),
CAPNPROTO_DEBUG_ASSERT(debug::fieldIsData(descriptor, fieldNumber, offset, 1),
"StructReadPtr::getDataField<bool>() type mismatch.");
// This branch should always be optimized away when inlining.
......@@ -376,13 +377,13 @@ inline bool StructReadPtr::getDataField<bool>(int fieldNumber, unsigned int offs
}
inline StructReadPtr StructReadPtr::getStructField(int fieldNumber, unsigned int refIndex) const {
CAPNPROTO_DEBUG_ASSERT(internal::debug::fieldIsStruct(descriptor, fieldNumber, refIndex),
CAPNPROTO_DEBUG_ASSERT(debug::fieldIsStruct(descriptor, fieldNumber, refIndex),
"StructReadPtr::getStructField() type mismatch.");
return getStructFieldInternal(fieldNumber, refIndex);
}
inline ListReadPtr StructReadPtr::getListField(int fieldNumber, unsigned int refIndex) const {
CAPNPROTO_DEBUG_ASSERT(internal::debug::fieldIsList(descriptor, fieldNumber, refIndex),
CAPNPROTO_DEBUG_ASSERT(debug::fieldIsList(descriptor, fieldNumber, refIndex),
"StructReadPtr::getListField() type mismatch.");
return getListFieldInternal(fieldNumber, refIndex);
}
......@@ -393,16 +394,14 @@ inline uint32_t ListPtr::size() { return elementCount; }
template <typename T>
inline T ListPtr::getDataElement(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, sizeof(T) * 8),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, sizeof(T) * 8),
"ListPtr::getDataElement() type mismatch.");
return reinterpret_cast<WireValue<T>*>(ptr)[index].get();
}
template <>
inline bool ListPtr::getDataElement<bool>(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, 1),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, 1),
"ListPtr::getDataElement<bool>() type mismatch.");
uint8_t byte = *(reinterpret_cast<uint8_t*>(ptr) + (index / 8));
return (byte & (1 << (index % 8))) != 0;
......@@ -410,16 +409,14 @@ inline bool ListPtr::getDataElement<bool>(unsigned int index) const {
template <typename T>
inline void ListPtr::setDataElement(unsigned int index, T value) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, sizeof(T) * 8),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, sizeof(T) * 8),
"ListPtr::setDataElement() type mismatch.");
reinterpret_cast<WireValue<T>*>(ptr)[index].set(value);
}
template <>
inline void ListPtr::setDataElement<bool>(unsigned int index, bool value) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, 1),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, 1),
"ListPtr::setDataElement<bool>() type mismatch.");
uint8_t* byte = reinterpret_cast<uint8_t*>(ptr) + (index / 8);
*byte = (*byte & ~(1 << (index % 8)))
......@@ -428,21 +425,21 @@ inline void ListPtr::setDataElement<bool>(unsigned int index, bool value) const
inline StructPtr ListPtr::getStructElement(unsigned int index, uint32_t elementWordSize) const {
CAPNPROTO_DEBUG_ASSERT(index < elementCount, "List index out of range.");
CAPNPROTO_DEBUG_ASSERT(internal::debug::elementsAreStructs(descriptor, elementWordSize),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreStructs(descriptor, elementWordSize),
"ListPtr::getStructElement() type mismatch.");
return getStructElementInternal(index, elementWordSize);
}
inline ListPtr ListPtr::initListElement(unsigned int index, uint32_t size) const {
CAPNPROTO_DEBUG_ASSERT(index < elementCount, "List index out of range.");
CAPNPROTO_DEBUG_ASSERT(internal::debug::elementsAreLists(descriptor),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreLists(descriptor),
"ListPtr::initListElement() type mismatch.");
return initListElementInternal(index, size);
}
inline ListPtr ListPtr::getListElement(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(index < elementCount, "List index out of range.");
CAPNPROTO_DEBUG_ASSERT(internal::debug::elementsAreLists(descriptor),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreLists(descriptor),
"ListPtr::getListElement() type mismatch.");
return getListElementInternal(index);
}
......@@ -453,8 +450,7 @@ inline uint32_t ListReadPtr::size() { return elementCount; }
template <typename T>
inline T ListReadPtr::getDataElement(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, sizeof(T) * 8),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, sizeof(T) * 8),
"ListReadPtr::getDataElement() type mismatch.");
return *reinterpret_cast<const T*>(
reinterpret_cast<const uint8_t*>(ptr) + index * (stepBits / 8));
......@@ -462,8 +458,7 @@ inline T ListReadPtr::getDataElement(unsigned int index) const {
template <>
inline bool ListReadPtr::getDataElement<bool>(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(
internal::debug::elementsAreData(descriptor, 1),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreData(descriptor, 1),
"ListReadPtr::getDataElement<bool>() type mismatch.");
unsigned int bitIndex = index * stepBits;
uint8_t byte = *(reinterpret_cast<const uint8_t*>(ptr) + (bitIndex / 8));
......@@ -472,18 +467,19 @@ inline bool ListReadPtr::getDataElement<bool>(unsigned int index) const {
inline StructReadPtr ListReadPtr::getStructElement(unsigned int index) const {
CAPNPROTO_DEBUG_ASSERT(index < elementCount, "List index out of range.");
CAPNPROTO_DEBUG_ASSERT(internal::debug::elementsAreStructs(descriptor),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreStructs(descriptor),
"ListReadPtr::getStructElement() type mismatch.");
return getStructElementInternal(index);
}
inline ListReadPtr ListReadPtr::getListElement(unsigned int index, uint32_t size) const {
CAPNPROTO_DEBUG_ASSERT(index < elementCount, "List index out of range.");
CAPNPROTO_DEBUG_ASSERT(internal::debug::elementsAreLists(descriptor),
CAPNPROTO_DEBUG_ASSERT(debug::elementsAreLists(descriptor),
"ListReadPtr::getListElement() type mismatch.");
return getListElementInternal(index, size);
}
} // namespace internal
} // namespace capnproto
#endif // CAPNPROTO_WIRE_FORMAT_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment