Commit 8e1d1e53 authored by Kenton Varda's avatar Kenton Varda

kj::Arena for fast arena-style allocation.

parent ddb57c2d
......@@ -107,6 +107,7 @@ includekj_HEADERS = \
src/kj/string.h \
src/kj/exception.h \
src/kj/debug.h \
src/kj/arena.h \
src/kj/io.h \
src/kj/tuple.h
......@@ -142,6 +143,7 @@ libcapnp_la_SOURCES= \
src/kj/string.c++ \
src/kj/exception.c++ \
src/kj/debug.c++ \
src/kj/arena.c++ \
src/kj/io.c++ \
src/kj/parse/char.c++ \
src/capnp/blob.c++ \
......@@ -189,6 +191,7 @@ capnp_test_SOURCES = \
src/kj/string-test.c++ \
src/kj/exception-test.c++ \
src/kj/debug-test.c++ \
src/kj/arena-test.c++ \
src/kj/units-test.c++ \
src/kj/tuple-test.c++ \
src/kj/parse/common-test.c++ \
......
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arena.h"
#include "debug.h"
#include <gtest/gtest.h>
#include <stdint.h>
namespace kj {
namespace {
struct TestObject {
TestObject() {
index = count;
KJ_ASSERT(index != throwAt);
++count;
}
TestObject(const TestObject& other) {
KJ_ASSERT(other.index != throwAt);
index = -1;
copiedCount++;
}
~TestObject() noexcept(false) {
if (index == -1) {
--copiedCount;
} else {
--count;
EXPECT_EQ(index, count);
KJ_ASSERT(count != throwAt);
}
}
int index;
static int count;
static int copiedCount;
static int throwAt;
};
int TestObject::count = 0;
int TestObject::copiedCount = 0;
int TestObject::throwAt = -1;
TEST(Arena, Object) {
TestObject::count = 0;
TestObject::throwAt = -1;
{
Arena arena;
TestObject& obj1 = arena.allocate<TestObject>();
TestObject& obj2 = arena.allocate<TestObject>();
EXPECT_LT(&obj1, &obj2);
EXPECT_EQ(2, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
TEST(Arena, TrivialObject) {
Arena arena;
int& i1 = arena.allocate<int>();
int& i2 = arena.allocate<int>();
// Trivial objects should be tightly-packed.
EXPECT_EQ(&i1 + 1, &i2);
}
TEST(Arena, OwnObject) {
TestObject::count = 0;
TestObject::throwAt = -1;
Arena arena;
{
Own<TestObject> obj1 = arena.allocateOwn<TestObject>();
Own<TestObject> obj2 = arena.allocateOwn<TestObject>();
EXPECT_LT(obj1.get(), obj2.get());
EXPECT_EQ(2, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
TEST(Arena, Array) {
TestObject::count = 0;
TestObject::throwAt = -1;
{
Arena arena;
ArrayPtr<TestObject> arr1 = arena.allocateArray<TestObject>(4);
ArrayPtr<TestObject> arr2 = arena.allocateArray<TestObject>(2);
EXPECT_EQ(4, arr1.size());
EXPECT_EQ(2, arr2.size());
EXPECT_LE(arr1.end(), arr2.begin());
EXPECT_EQ(6, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
TEST(Arena, TrivialArray) {
Arena arena;
ArrayPtr<int> arr1 = arena.allocateArray<int>(16);
ArrayPtr<int> arr2 = arena.allocateArray<int>(8);
// Trivial arrays should be tightly-packed.
EXPECT_EQ(arr1.end(), arr2.begin());
}
TEST(Arena, OwnArray) {
TestObject::count = 0;
TestObject::throwAt = -1;
Arena arena;
{
Array<TestObject> arr1 = arena.allocateOwnArray<TestObject>(4);
Array<TestObject> arr2 = arena.allocateOwnArray<TestObject>(2);
EXPECT_EQ(4, arr1.size());
EXPECT_EQ(2, arr2.size());
EXPECT_LE(arr1.end(), arr2.begin());
EXPECT_EQ(6, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
#ifndef KJ_NO_EXCEPTIONS
TEST(Arena, ObjectThrow) {
TestObject::count = 0;
TestObject::throwAt = 1;
{
Arena arena;
arena.allocate<TestObject>();
EXPECT_ANY_THROW(arena.allocate<TestObject>());
EXPECT_EQ(1, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
TEST(Arena, ArrayThrow) {
TestObject::count = 0;
TestObject::throwAt = 2;
{
Arena arena;
EXPECT_ANY_THROW(arena.allocateArray<TestObject>(4));
EXPECT_EQ(2, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
#endif
TEST(Arena, Alignment) {
Arena arena;
char& c = arena.allocate<char>();
long& l = arena.allocate<long>();
char& c2 = arena.allocate<char>();
ArrayPtr<char> arr = arena.allocateArray<char>(8);
EXPECT_EQ(alignof(long) + sizeof(long), &c2 - &c);
EXPECT_EQ(alignof(long), reinterpret_cast<char*>(&l) - &c);
EXPECT_EQ(sizeof(char), arr.begin() - &c2);
}
TEST(Arena, EndOfChunk) {
union {
byte scratch[16];
uint64_t align;
};
Arena arena(arrayPtr(scratch, sizeof(scratch)));
uint64_t& i = arena.allocate<uint64_t>();
EXPECT_EQ(scratch, reinterpret_cast<byte*>(&i));
uint64_t& i2 = arena.allocate<uint64_t>();
EXPECT_EQ(scratch + 8, reinterpret_cast<byte*>(&i2));
uint64_t& i3 = arena.allocate<uint64_t>();
EXPECT_NE(scratch + 16, reinterpret_cast<byte*>(&i3));
}
TEST(Arena, EndOfChunkAlignment) {
union {
byte scratch[10];
uint64_t align;
};
Arena arena(arrayPtr(scratch, sizeof(scratch)));
uint16_t& i = arena.allocate<uint16_t>();
EXPECT_EQ(scratch, reinterpret_cast<byte*>(&i));
// Although there is technically enough space in the scratch array, it is not aligned correctly.
uint64_t& i2 = arena.allocate<uint64_t>();
EXPECT_TRUE(reinterpret_cast<byte*>(&i2) < scratch ||
reinterpret_cast<byte*>(&i2) > scratch + sizeof(scratch));
}
TEST(Arena, TooBig) {
Arena arena(1024);
byte& b1 = arena.allocate<byte>();
ArrayPtr<byte> arr = arena.allocateArray<byte>(1024);
byte& b2 = arena.allocate<byte>();
EXPECT_EQ(&b1 + 1, &b2);
// Write to the array to make sure it's valid.
memset(arr.begin(), 0xbe, arr.size());
}
TEST(Arena, MultiSegment) {
Arena arena(24);
uint64_t& i1 = arena.allocate<uint64_t>();
uint64_t& i2 = arena.allocate<uint64_t>();
uint64_t& i3 = arena.allocate<uint64_t>();
EXPECT_EQ(&i1 + 1, &i2);
EXPECT_NE(&i2 + 1, &i3);
i1 = 1234;
i2 = 5678;
i3 = 9012;
}
TEST(Arena, Constructor) {
Arena arena;
EXPECT_EQ(123, arena.allocate<uint64_t>(123));
EXPECT_EQ("foo", arena.allocate<StringPtr>("foo", 3));
}
TEST(Arena, Strings) {
Arena arena;
StringPtr foo = arena.copyString("foo");
StringPtr bar = arena.copyString("bar");
StringPtr quux = arena.copyString("quux");
StringPtr corge = arena.copyString("corge");
EXPECT_EQ("foo", foo);
EXPECT_EQ("bar", bar);
EXPECT_EQ("quux", quux);
EXPECT_EQ("corge", corge);
EXPECT_EQ(foo.end() + 1, bar.begin());
EXPECT_EQ(bar.end() + 1, quux.begin());
EXPECT_EQ(quux.end() + 1, corge.begin());
}
} // namespace
} // namespace kj
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arena.h"
#include "debug.h"
#include <stdint.h>
namespace kj {
const Arena::ArrayDisposerImpl Arena::ArrayDisposerImpl::instance = Arena::ArrayDisposerImpl();
Arena::Arena(size_t chunkSize): state(chunkSize) {}
Arena::Arena(ArrayPtr<byte> scratch, size_t chunkSize): state(chunkSize) {
state.pos = scratch.begin();
state.chunkEnd = scratch.end();
}
Arena::~Arena() noexcept(false) {
// Run cleanup explicitly. It will be executed again implicitly when state's destructor is
// called. This ensures that if the first pass throws an exception, remaining objects are still
// destroyed. If the second pass throws, the program terminates, but any destructors that could
// throw should be using UnwindDetector to avoid this.
state.cleanup();
}
void Arena::State::cleanup() {
while (objectList != nullptr) {
void* ptr = objectList + 1;
auto destructor = objectList->destructor;
objectList = objectList->next;
destructor(ptr);
}
while (chunkList != nullptr) {
void* ptr = chunkList;
chunkList = chunkList->next;
operator delete(ptr);
}
}
namespace {
constexpr bool isPowerOfTwo(size_t value) {
return (value & value - 1) == 0;
}
inline byte* alignTo(byte* p, uint alignment) {
// Round the pointer up to the next aligned value.
KJ_DASSERT(isPowerOfTwo(alignment), alignment);
uintptr_t mask = alignment - 1;
uintptr_t i = reinterpret_cast<uintptr_t>(p);
return reinterpret_cast<byte*>((i + mask) & ~mask);
}
} // namespace
void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) {
// Code below depends on power-of-two alignment and header sizes.
static_assert(isPowerOfTwo(sizeof(ChunkHeader)), "sizeof(ChunkHeader) is not a power of 2.");
static_assert(isPowerOfTwo(sizeof(ObjectHeader)), "sizeof(ObjectHeader) is not a power of 2.");
KJ_DASSERT(isPowerOfTwo(alignment), alignment);
// Offset we must apply if the allocated space is being prefixed with these headers.
uint chunkHeaderSize = kj::max(alignment, sizeof(ChunkHeader));
uint objectHeaderSize = kj::max(alignment, sizeof(ObjectHeader));
if (hasDisposer) {
amount += objectHeaderSize;
}
void* result;
byte* alignedPos = alignTo(state.pos, alignment);
byte* endPos = alignedPos + amount;
if (endPos <= state.chunkEnd) {
// There's enough space in the current chunk.
result = alignedPos;
state.pos = alignedPos + amount;
} else if (amount + chunkHeaderSize > state.chunkSize ||
state.chunkEnd - state.pos > state.chunkSize / 4) {
// This object is too big to fit in one chunk, or we'd waste more than a quarter of the chunk
// by starting a new one now. Instead, allocate the object in its own independent chunk.
ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(operator new(chunkHeaderSize + amount));
result = reinterpret_cast<byte*>(newChunk) + chunkHeaderSize;
newChunk->next = state.chunkList;
state.chunkList = newChunk;
// We don't update state.pos and state.chunkEnd because the new chunk has no extra space but
// the old chunk might.
} else {
// Allocate a new chunk.
ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(operator new(state.chunkSize));
result = reinterpret_cast<byte*>(newChunk) + chunkHeaderSize;
newChunk->next = state.chunkList;
state.chunkList = newChunk;
state.pos = reinterpret_cast<byte*>(result) + amount;
state.chunkEnd = reinterpret_cast<byte*>(newChunk) + state.chunkSize;
}
if (hasDisposer) {
// Reserve space for the ObjectHeader, but don't add it to the object list yet.
result = reinterpret_cast<byte*>(result) + objectHeaderSize;
}
return result;
}
StringPtr Arena::copyString(StringPtr content) {
char* data = reinterpret_cast<char*>(allocateBytes(content.size() + 1, 1, false));
memcpy(data, content.cStr(), content.size() + 1);
return StringPtr(data, content.size());
}
void Arena::setDestructor(void* ptr, void (*destructor)(void*)) {
ObjectHeader* header = reinterpret_cast<ObjectHeader*>(ptr) - 1;
header->destructor = destructor;
header->next = state.objectList;
state.objectList = header;
}
void Arena::ArrayDisposerImpl::disposeImpl(
void* firstElement, size_t elementSize, size_t elementCount,
size_t capacity, void (*destroyElement)(void*)) const {
if (destroyElement != nullptr) {
ExceptionSafeArrayUtil guard(firstElement, elementSize, elementCount, destroyElement);
guard.destroyAll();
}
}
} // namespace kj
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef KJ_ARENA_H_
#define KJ_ARENA_H_
#include "memory.h"
#include "array.h"
#include "string.h"
namespace kj {
class Arena {
// A class which allows several objects to be allocated in contiguous chunks of memory, then
// frees them all at once.
public:
Arena(size_t chunkSize = 1024);
// Create an Arena that tries to allocate in chunks of the given size. It may deviate from the
// size if an object is too large to fit or to avoid excessive fragmentation. Each chunk has
// a one-word header which is included in the chunk size.
Arena(ArrayPtr<byte> scratch, size_t chunkSize = 1024);
// Allocates from the given scratch space first, only resorting to the heap when it runs out.
KJ_DISALLOW_COPY(Arena);
~Arena() noexcept(false);
template <typename T, typename... Params>
T& allocate(Params&&... params);
template <typename T>
ArrayPtr<T> allocateArray(size_t size);
// Allocate an object or array of type T. If T has a non-trivial destructor, that destructor
// will be run during the Arena's destructor. Such destructors are run in opposite order of
// allocation. Note that these methods must maintain a list of destructors to call, which has
// overhead, but this overhead only applies if T has a non-trivial destructor.
template <typename T, typename... Params>
Own<T> allocateOwn(Params&&... params);
template <typename T>
Array<T> allocateOwnArray(size_t size);
template <typename T>
ArrayBuilder<T> allocateOwnArrayBuilder(size_t capacity);
// Allocate an object or array of type T. Destructors are executed when the returned Own<T>
// or Array<T> goes out-of-scope, which must happen before the Arena is destroyed. This variant
// is useful when you need to control when the destructor is called. This variant also avoids
// the need for the Arena itself to keep track of destructors to call later, which may make it
// slightly more efficient.
StringPtr copyString(StringPtr content);
// Make a copy of the given string inside the arena, and return a pointer to the copy.
private:
struct ChunkHeader {
ChunkHeader* next;
};
struct ObjectHeader {
void (*destructor)(void*);
ObjectHeader* next;
};
struct State {
size_t chunkSize;
ChunkHeader* chunkList;
ObjectHeader* objectList;
byte* pos;
byte* chunkEnd;
inline State(size_t chunkSize)
: chunkSize(chunkSize), chunkList(nullptr), objectList(nullptr),
pos(nullptr), chunkEnd(nullptr) {}
inline ~State() noexcept(false) { cleanup(); }
void cleanup();
// Run all destructors, leaving the above pointers null. If a destructor throws, the State is
// left in a consistent state, such that if cleanup() is called again, it will pick up where
// it left off.
};
State state;
void* allocateBytes(size_t amount, uint alignment, bool hasDisposer);
// Allocate the given number of bytes. `hasDisposer` must be true if `setDisposer()` may be
// called on this pointer later.
void setDestructor(void* ptr, void (*destructor)(void*));
// Schedule the given destructor to be executed when the Arena is destroyed. `ptr` must be a
// pointer previously returned by an `allocateBytes()` call for which `hasDisposer` was true.
template <typename T>
class DisposerImpl: public Disposer {
public:
static const DisposerImpl instance;
void disposeImpl(void* pointer) const override {
reinterpret_cast<T*>(pointer)->~T();
}
};
class ArrayDisposerImpl: public ArrayDisposer {
public:
static const ArrayDisposerImpl instance;
void disposeImpl(void* firstElement, size_t elementSize, size_t elementCount,
size_t capacity, void (*destroyElement)(void*)) const override;
};
template <typename T>
static void destroyArray(void* pointer) {
size_t elementCount = *reinterpret_cast<size_t*>(pointer);
constexpr size_t prefixSize = kj::max(alignof(T), sizeof(size_t));
ArrayDisposerImpl::instance.disposeImpl(
reinterpret_cast<byte*>(pointer) + prefixSize,
sizeof(T), elementCount, elementCount, &destroyObject<T>);
}
template <typename T>
static void destroyObject(void* pointer) {
dtor(*reinterpret_cast<T*>(pointer));
}
};
// =======================================================================================
// Inline implementation details
template <typename T, typename... Params>
T& Arena::allocate(Params&&... params) {
T& result = *reinterpret_cast<T*>(allocateBytes(
sizeof(T), alignof(T), !__has_trivial_destructor(T)));
if (!__has_trivial_constructor(T) || sizeof...(Params) > 0) {
ctor(result, kj::fwd<Params>(params)...);
}
if (!__has_trivial_destructor(T)) {
setDestructor(&result, &destroyObject<T>);
}
return result;
}
template <typename T>
ArrayPtr<T> Arena::allocateArray(size_t size) {
if (__has_trivial_destructor(T)) {
ArrayPtr<T> result =
arrayPtr(reinterpret_cast<T*>(allocateBytes(
sizeof(T) * size, alignof(T), false)), size);
if (!__has_trivial_constructor(T)) {
for (size_t i = 0; i < size; i++) {
ctor(result[i]);
}
}
return result;
} else {
// Allocate with a 64-bit prefix in which we store the array size.
constexpr size_t prefixSize = kj::max(alignof(T), sizeof(size_t));
void* base = allocateBytes(sizeof(T) * size + prefixSize, alignof(T), true);
size_t& tag = *reinterpret_cast<size_t*>(base);
ArrayPtr<T> result =
arrayPtr(reinterpret_cast<T*>(reinterpret_cast<byte*>(base) + prefixSize), size);
setDestructor(base, &destroyArray<T>);
if (__has_trivial_constructor(T)) {
tag = size;
} else {
// In case of constructor exceptions, we need the tag to end up storing the number of objects
// that were successfully constructed, so that they'll be properly destroyed.
tag = 0;
for (size_t i = 0; i < size; i++) {
ctor(result[i]);
tag = i + 1;
}
}
return result;
}
}
template <typename T, typename... Params>
Own<T> Arena::allocateOwn(Params&&... params) {
T& result = *reinterpret_cast<T*>(allocateBytes(sizeof(T), alignof(T), false));
if (!__has_trivial_constructor(T) || sizeof...(Params) > 0) {
ctor(result, kj::fwd<Params>(params)...);
}
return Own<T>(&result, DisposerImpl<T>::instance);
}
template <typename T>
Array<T> Arena::allocateOwnArray(size_t size) {
ArrayBuilder<T> result = allocateOwnArrayBuilder<T>(size);
for (size_t i = 0; i < size; i++) {
result.add();
}
return result.finish();
}
template <typename T>
ArrayBuilder<T> Arena::allocateOwnArrayBuilder(size_t capacity) {
return ArrayBuilder<T>(
reinterpret_cast<T*>(allocateBytes(sizeof(T) * capacity, alignof(T), false)),
capacity, ArrayDisposerImpl::instance);
}
template <typename T>
const Arena::DisposerImpl<T> Arena::DisposerImpl<T>::instance = Arena::DisposerImpl<T>();
} // namespace kj
#endif // KJ_ARENA_H_
......@@ -24,36 +24,25 @@
#include "array.h"
namespace kj {
namespace _ { // private
struct HeapArrayDisposer::ExceptionGuard {
byte* pos;
size_t elementSize;
size_t elementCount;
size_t constructedCount;
void (*destroyElement)(void*);
ExceptionGuard(void* ptr, size_t elementSize, size_t elementCount,
void (*destroyElement)(void*))
: pos(reinterpret_cast<byte*>(ptr) + elementSize * elementCount),
elementSize(elementSize), elementCount(elementCount),
destroyElement(destroyElement) {}
~ExceptionGuard() noexcept(false) {
if (pos != nullptr) {
destroyAll();
operator delete(pos);
}
void ExceptionSafeArrayUtil::construct(size_t count, void (*constructElement)(void*)) {
while (count > 0) {
constructElement(pos);
pos += elementSize;
++constructedElementCount;
--count;
}
}
void destroyAll() {
while (elementCount > 0) {
void ExceptionSafeArrayUtil::destroyAll() {
while (constructedElementCount > 0) {
pos -= elementSize;
--elementCount;
--constructedElementCount;
destroyElement(pos);
}
}
};
}
namespace _ { // private
void* HeapArrayDisposer::allocateImpl(size_t elementSize, size_t elementCount, size_t capacity,
void (*constructElement)(void*),
......@@ -70,13 +59,9 @@ void* HeapArrayDisposer::allocateImpl(size_t elementSize, size_t elementCount, s
--elementCount;
}
} else {
ExceptionGuard guard(result, elementSize, 0, destroyElement);
while (guard.elementCount < elementCount) {
constructElement(guard.pos);
guard.pos += elementSize;
++guard.elementCount;
}
guard.pos = nullptr;
ExceptionSafeArrayUtil guard(result, elementSize, 0, destroyElement);
guard.construct(elementCount, constructElement);
guard.release();
}
return result;
......@@ -90,11 +75,9 @@ void HeapArrayDisposer::disposeImpl(
if (destroyElement == nullptr) {
operator delete(firstElement);
} else {
ExceptionGuard guard(firstElement, elementSize, elementCount, destroyElement);
KJ_DEFER(operator delete(firstElement));
ExceptionSafeArrayUtil guard(firstElement, elementSize, elementCount, destroyElement);
guard.destroyAll();
// If an exception is thrown, we'll continue the destruction process in ExceptionGuard's
// destructor. If _that_ throws an exception, the program terminates according to C++ rules.
}
}
......
......@@ -61,6 +61,45 @@ private:
struct Dispose_;
};
class ExceptionSafeArrayUtil {
// Utility class that assists in constructing or destroying elements of an array, where the
// constructor or destructor could throw exceptions. In case of an exception,
// ExceptionSafeArrayUtil's destructor will call destructors on all elements that have been
// constructed but not destroyed. Remember that destructors that throw exceptions are required
// to use UnwindDetector to detect unwind and avoid exceptions in this case. Therefore, no more
// than one exception will be thrown (and the program will not terminate).
public:
inline ExceptionSafeArrayUtil(void* ptr, size_t elementSize, size_t constructedElementCount,
void (*destroyElement)(void*))
: pos(reinterpret_cast<byte*>(ptr) + elementSize * constructedElementCount),
elementSize(elementSize), constructedElementCount(constructedElementCount),
destroyElement(destroyElement) {}
KJ_DISALLOW_COPY(ExceptionSafeArrayUtil);
inline ~ExceptionSafeArrayUtil() noexcept(false) {
if (constructedElementCount > 0) destroyAll();
}
void construct(size_t count, void (*constructElement)(void*));
// Construct the given number of elements.
void destroyAll();
// Destroy all elements. Call this immediately before ExceptionSafeArrayUtil goes out-of-scope
// to ensure that one element throwing an exception does not prevent the others from being
// destroyed.
void release() { constructedElementCount = 0; }
// Prevent ExceptionSafeArrayUtil's destructor from destroying the constructed elements.
// Call this after you've successfully finished constructing.
private:
byte* pos;
size_t elementSize;
size_t constructedElementCount;
void (*destroyElement)(void*);
};
// =======================================================================================
// Array
......@@ -185,8 +224,6 @@ private:
template <typename T, bool hasTrivialConstructor = __has_trivial_constructor(T),
bool hasNothrowConstructor = __has_nothrow_constructor(T)>
struct Allocate_;
struct ExceptionGuard;
};
} // namespace _ (private)
......@@ -295,14 +332,14 @@ public:
void addAll(Iterator start, Iterator end);
Array<T> finish() {
// We could safely remove this check as long as HeapArrayDisposer relies on operator delete
// (which doesn't need to know the original capacity) or if we created a custom disposer for
// ArrayBuilder which stores the capacity in a prefix. But that would mean we can't allow
// arbitrary disposers with ArrayBuilder in the future, and anyway this check might catch bugs.
// Probably we should just create a new Vector-like data structure if we want to allow building
// of arrays without knowing the final size in advance.
// We could safely remove this check if we assume that the disposer implementation doesn't
// need to know the original capacity, as is thes case with HeapArrayDisposer since it uses
// operator new() or if we created a custom disposer for ArrayBuilder which stores the capacity
// in a prefix. But that would make it hard to write cleverer heap allocators, and anyway this
// check might catch bugs. Probably people should use Vector if they want to build arrays
// without knowing the final size in advance.
KJ_IREQUIRE(pos == endPtr, "ArrayBuilder::finish() called prematurely.");
Array<T> result(reinterpret_cast<T*>(ptr), pos - ptr, _::HeapArrayDisposer::instance);
Array<T> result(reinterpret_cast<T*>(ptr), pos - ptr, *disposer);
ptr = nullptr;
pos = nullptr;
endPtr = nullptr;
......
......@@ -246,5 +246,23 @@ TEST(Common, MinMax) {
EXPECT_EQ(1234567890123456789ll, kj::max('a', 1234567890123456789ll));
}
TEST(Common, Defer) {
uint i = 0;
uint j = 1;
bool k = false;
{
KJ_DEFER(++i);
KJ_DEFER(j += 3; k = true);
EXPECT_EQ(0, i);
EXPECT_EQ(1, j);
EXPECT_FALSE(k);
}
EXPECT_EQ(1, i);
EXPECT_EQ(4, j);
EXPECT_TRUE(k);
}
} // namespace
} // namespace kj
......@@ -176,6 +176,12 @@ void inlineRequireFailure(
#define KJ_ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define KJ_CONCAT_(x, y) x##y
#define KJ_CONCAT(x, y) KJ_CONCAT_(x, y)
#define KJ_UNIQUE_NAME(prefix) KJ_CONCAT(prefix, __LINE__)
// Create a unique identifier name. We use concatenate __LINE__ rather than __COUNTER__ so that
// the name can be used multiple times in the same macro.
// =======================================================================================
// Template metaprogramming helpers.
......@@ -283,9 +289,9 @@ template<typename T> constexpr T&& mv(T& t) noexcept { return static_cast<T&&>(t
template<typename T> constexpr T&& fwd(NoInfer<T>& t) noexcept { return static_cast<T&&>(t); }
template <typename T, typename U>
auto min(T&& a, U&& b) -> decltype(a < b ? a : b) { return a < b ? a : b; }
inline constexpr auto min(T&& a, U&& b) -> decltype(a < b ? a : b) { return a < b ? a : b; }
template <typename T, typename U>
auto max(T&& a, U&& b) -> decltype(a > b ? a : b) { return a > b ? a : b; }
inline constexpr auto max(T&& a, U&& b) -> decltype(a > b ? a : b) { return a > b ? a : b; }
// =======================================================================================
// Manually invoking constructors and destructors
......@@ -710,6 +716,29 @@ To& downcast(From& from) {
return static_cast<To&>(from);
}
// =======================================================================================
// Defer
namespace _ { // private
template <typename Func>
class Deferred {
public:
inline Deferred(Func func): func(func) {}
inline ~Deferred() { func(); }
private:
Func func;
};
template <typename Func>
Deferred<Decay<Func>> defer(Func&& func) {
return Deferred<Decay<Func>>(kj::fwd<Func>(func));
}
} // namespace _ (private)
#define KJ_DEFER(code) auto KJ_UNIQUE_NAME(_kjDefer) = ::kj::_::defer([&](){code;})
} // namespace kj
#endif // KJ_COMMON_H_
......@@ -140,11 +140,12 @@ namespace kj {
errorNumber, code, #__VA_ARGS__, ##__VA_ARGS__);; f.fatal())
#define KJ_CONTEXT(...) \
auto _kjContextFunc = [&]() -> ::kj::_::Debug::Context::Value { \
auto KJ_UNIQUE_NAME(_kjContextFunc) = [&]() -> ::kj::_::Debug::Context::Value { \
return ::kj::_::Debug::Context::Value(__FILE__, __LINE__, \
::kj::_::Debug::makeContextDescription(#__VA_ARGS__, ##__VA_ARGS__)); \
}; \
::kj::_::Debug::ContextImpl<decltype(_kjContextFunc)> _kjContext(_kjContextFunc)
::kj::_::Debug::ContextImpl<decltype(KJ_UNIQUE_NAME(_kjContextFunc))> \
KJ_UNIQUE_NAME(_kjContext)(KJ_UNIQUE_NAME(_kjContextFunc))
#ifdef NDEBUG
#define KJ_DLOG(...) do {} while (false)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment